Projet de DeepLearning
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
# Données
from tensorflow.keras.datasets import fashion_mnist
#DL
from tensorflow.keras.datasets import reuters
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import classification_report
import shap
(train_data,train_labels),(test_data,test_labels)=fashion_mnist.load_data()
train_data[0]
print('Train: X=%s, y=%s' % (train_data.shape, train_labels.shape))
print('Test: X=%s, y=%s' % (test_data.shape, test_labels.shape))
train_labels
# Affichage des images
for i in range(9):
plt.subplot(330 + 1 + i)
plt.imshow(train_data[i], cmap=plt.get_cmap('gray'))
plt.show()
Encodage des données
def one_hot_encode(sequences, vocabulary_size=10000):
results=np.zeros((len(sequences), vocabulary_size))
for i, sequence in enumerate(sequences):
results[i,sequence] = 1
return results
x_train = one_hot_encode(train_data)
x_test = one_hot_encode(test_data)
x_train = x_train.T
x_test = x_test.T
train_data = train_data/ 255.0
test_data = test_data / 255.0
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_data[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
Feedforward Neural Network (DNN)
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images =train_images.reshape((60000,28,28,1))
train_images = train_images.astype('float32')/255
train_labels = to_categorical(train_labels)
test_images =test_images.reshape((10000,28,28,1))
test_images = test_images.astype('float32')/255
test_labels = to_categorical(test_labels)
model = models.Sequential()
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
history=model.fit(train_images[:50000],train_labels[:50000],epochs=10,
batch_size=64, validation_data=(train_images[50000:],train_labels[50000:]))
# loss
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.legend()
plt.show()
model.evaluate(test_images,test_labels)
print("test" ,classification_report(np.argmax(test_labels, axis=1), np.argmax(model.predict(test_images), axis=1)))
Convolutional Neural Network
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images =train_images.reshape((60000,28,28,1))
train_images = train_images.astype('float32')/255
train_labels = to_categorical(train_labels)
test_images =test_images.reshape((10000,28,28,1))
test_images = test_images.astype('float32')/255
test_labels = to_categorical(test_labels)
model=models.Sequential()
model.add(layers.Conv2D(32,(3,3), activation='relu'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
history=model.fit(train_images[:50000],train_labels[:50000],epochs=6,
batch_size=64,validation_data=(train_images[50000:],train_labels[50000:]))
# loss
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.legend()
plt.show()
model.evaluate(test_images,test_labels)
model.summary()
Prédiction
i=346
plt.gray()
plt.imshow(test_images[i])
print('label: ', test_labels[i])
test_labels[i]
model.predict(test_images[i:i+1])
model.summary()
print("test" ,classification_report(np.argmax(test_labels, axis=1), np.argmax(model.predict(test_images), axis=1)))
Image Generator
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
model2=models.Sequential()
model2.add(layers.Conv2D(32,(3,3), activation='relu'))
model2.add(layers.MaxPooling2D(2,2))
model2.add(layers.Flatten())
model2.add(layers.Dense(64, activation='relu'))
model2.add(layers.Dense(10, activation='softmax'))
model2.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
validation_split=0.2)
train_datagen.fit(train_images)
#tf.config.run_functions_eagerly(True)
model2.fit(train_datagen.flow(train_images, train_labels, batch_size=32,
subset='training', shuffle=True),
validation_data=train_datagen.flow(train_images, train_labels,
batch_size=8, subset='validation', shuffle=True),
steps_per_epoch=1500, epochs=10)
model2.evaluate(test_images,test_labels)
Explicabilité
# select a set of background examples to take an expectation over
background = train_images[np.random.choice(train_images.shape[0], 100, replace=False)]
# explain predictions of the model on three images
e = shap.DeepExplainer(model, background)
# ...or pass tensors directly
# e = shap.DeepExplainer((model.layers[0].input, model.layers[-1].output), background)
shap_values = e.shap_values(test_images[:10])
# plot the feature attributions
shap_plot = shap.image_plot(shap_values, -test_images[:10])
display(shap_plot)
class_names = ['T_shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure(figsize=(10, 10))
for i in range(10):
plt.subplot(2, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(test_images[i].reshape((28,28)))
label_index = np.argmax(test_labels[i])#int(test_labels[i])
plt.title(class_names[label_index] + " " + str(label_index))
plt.show()
Transfer learning
(X_train_data, y_train_data), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
y_train = tf.keras.utils.to_categorical(y_train_data, 10)
y_test = tf.keras.utils.to_categorical(y_test, 10)
X_train= np.array([i.flatten() for i in X_train_data])
X_test= np.array([i.flatten() for i in X_test])
Y_train= np.array (y_train_data) # (60000,)
Y_test = np.array(y_test) #(10000,)
# Convert the images into 3 channels
X_train = np.dstack([X_train] * 3)
X_test = np.dstack([X_test]*3)
print(X_train.shape,X_test.shape)
# Reshape images as per the tensor format required by tensorflow
X_train = X_train.reshape(-1, 28,28,3)
X_test= X_test.reshape (-1,28,28,3)
print(X_train.shape,X_test.shape)
# Resize the images 48*48 as required by VGG16
from tensorflow.keras.preprocessing.image import img_to_array, array_to_img
X_train = np.asarray([img_to_array(array_to_img(im, scale=False).resize((48,48))) for im in X_train])
X_test = np.asarray([img_to_array(array_to_img(im, scale=False).resize((48,48))) for im in X_test])
#train_x = preprocess_input(x)
print(X_train.shape, X_test.shape)
from tensorflow.keras.layers import Input, Lambda, Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
# add preprocessing layer to the front of VGG
vgg = VGG16(input_shape = [48, 48, 3], weights='imagenet', include_top=False)
# don't train existing weights
for layer in vgg.layers:
layer.trainable = False
# our layers - you can add more if you want
x = Flatten()(vgg.output)
#x = tf.keras.layers.MaxPooling2D(2, 2)(x)
#x = Dense(1000, activation='relu')(x)
prediction = Dense(10, activation='softmax')(x)
# create a model object
model = Model(inputs=vgg.input, outputs=prediction)
# view the structure of the model
model.summary()
# tell the model what cost and optimization method to use
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
validation_split=0.2)
train_datagen.fit(X_train)
tf.config.run_functions_eagerly(True)
model.fit(train_datagen.flow(X_train, y_train, batch_size=32,
subset='training', shuffle=True),
validation_data=train_datagen.flow(X_train, y_train,
batch_size=8, subset='validation', shuffle=True),
steps_per_epoch=1500, epochs=10)