Projet de DeepLearning
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
# Données
from tensorflow.keras.datasets import fashion_mnist
#DL
from tensorflow.keras.datasets import reuters
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import classification_report
import shap
(train_data,train_labels),(test_data,test_labels)=fashion_mnist.load_data()
train_data[0]
print('Train: X=%s, y=%s' % (train_data.shape, train_labels.shape))
print('Test: X=%s, y=%s' % (test_data.shape, test_labels.shape))
Train: X=(60000, 28, 28), y=(60000,)
Test: X=(10000, 28, 28), y=(10000,)
train_labels
# Affichage des images
for i in range(9):
plt.subplot(330 + 1 + i)
plt.imshow(train_data[i], cmap=plt.get_cmap('gray'))
plt.show()
Encodage des données
def one_hot_encode(sequences, vocabulary_size=10000):
results=np.zeros((len(sequences), vocabulary_size))
for i, sequence in enumerate(sequences):
results[i,sequence] = 1
return results
x_train = one_hot_encode(train_data)
x_test = one_hot_encode(test_data)
x_train = x_train.T
x_test = x_test.T
train_data = train_data/ 255.0
test_data = test_data / 255.0
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_data[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
Feedforward Neural Network (DNN)
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images =train_images.reshape((60000,28,28,1))
train_images = train_images.astype('float32')/255
train_labels = to_categorical(train_labels)
test_images =test_images.reshape((10000,28,28,1))
test_images = test_images.astype('float32')/255
test_labels = to_categorical(test_labels)
model = models.Sequential()
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
history=model.fit(train_images[:50000],train_labels[:50000],epochs=10,
batch_size=64, validation_data=(train_images[50000:],train_labels[50000:]))
Epoch 1/10
782/782 [==============================] - 14s 16ms/step - loss: 0.6819 - accuracy: 0.7528 - val_loss: 0.7873 - val_accuracy: 0.7648
Epoch 2/10
782/782 [==============================] - 5s 6ms/step - loss: 0.3845 - accuracy: 0.8581 - val_loss: 0.4243 - val_accuracy: 0.8433
Epoch 3/10
782/782 [==============================] - 5s 6ms/step - loss: 0.3451 - accuracy: 0.8745 - val_loss: 0.4360 - val_accuracy: 0.8483
Epoch 4/10
782/782 [==============================] - 5s 6ms/step - loss: 0.3176 - accuracy: 0.8819 - val_loss: 0.4082 - val_accuracy: 0.8610
Epoch 5/10
782/782 [==============================] - 8s 10ms/step - loss: 0.3049 - accuracy: 0.8872 - val_loss: 0.4149 - val_accuracy: 0.8606
Epoch 6/10
782/782 [==============================] - 6s 7ms/step - loss: 0.2854 - accuracy: 0.8949 - val_loss: 0.4256 - val_accuracy: 0.8618
Epoch 7/10
782/782 [==============================] - 5s 6ms/step - loss: 0.2803 - accuracy: 0.8971 - val_loss: 0.5820 - val_accuracy: 0.8230
Epoch 8/10
782/782 [==============================] - 5s 7ms/step - loss: 0.2751 - accuracy: 0.8976 - val_loss: 0.3872 - val_accuracy: 0.8817
Epoch 9/10
782/782 [==============================] - 7s 9ms/step - loss: 0.2597 - accuracy: 0.9052 - val_loss: 0.5044 - val_accuracy: 0.8596
Epoch 10/10
782/782 [==============================] - 9s 11ms/step - loss: 0.2572 - accuracy: 0.9046 - val_loss: 0.4217 - val_accuracy: 0.8743
# loss
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.legend()
plt.show()
model.evaluate(test_images,test_labels)
313/313 [==============================] - 2s 5ms/step - loss: 0.4677 - accuracy: 0.8701
print("test" ,classification_report(np.argmax(test_labels, axis=1), np.argmax(model.predict(test_images), axis=1)))
test precision recall f1-score support
0 0.87 0.78 0.82 1000
1 0.99 0.97 0.98 1000
2 0.78 0.75 0.77 1000
3 0.83 0.94 0.88 1000
4 0.71 0.84 0.77 1000
5 0.98 0.96 0.97 1000
6 0.71 0.61 0.66 1000
7 0.95 0.93 0.94 1000
8 0.97 0.97 0.97 1000
9 0.93 0.96 0.94 1000
accuracy 0.87 10000
macro avg 0.87 0.87 0.87 10000
weighted avg 0.87 0.87 0.87 10000
Convolutional Neural Network
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images =train_images.reshape((60000,28,28,1))
train_images = train_images.astype('float32')/255
train_labels = to_categorical(train_labels)
test_images =test_images.reshape((10000,28,28,1))
test_images = test_images.astype('float32')/255
test_labels = to_categorical(test_labels)
model=models.Sequential()
model.add(layers.Conv2D(32,(3,3), activation='relu'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
history=model.fit(train_images[:50000],train_labels[:50000],epochs=6,
batch_size=64,validation_data=(train_images[50000:],train_labels[50000:]))
Epoch 1/6
782/782 [==============================] - 37s 46ms/step - loss: 0.6512 - accuracy: 0.7709 - val_loss: 0.3415 - val_accuracy: 0.8774
Epoch 2/6
782/782 [==============================] - 33s 42ms/step - loss: 0.3216 - accuracy: 0.8844 - val_loss: 0.3183 - val_accuracy: 0.8834
Epoch 3/6
782/782 [==============================] - 34s 43ms/step - loss: 0.2555 - accuracy: 0.9079 - val_loss: 0.2579 - val_accuracy: 0.9092
Epoch 4/6
782/782 [==============================] - 38s 49ms/step - loss: 0.2248 - accuracy: 0.9203 - val_loss: 0.2785 - val_accuracy: 0.9027
Epoch 5/6
782/782 [==============================] - 36s 46ms/step - loss: 0.2034 - accuracy: 0.9263 - val_loss: 0.2698 - val_accuracy: 0.9064
Epoch 6/6
782/782 [==============================] - 40s 51ms/step - loss: 0.1870 - accuracy: 0.9343 - val_loss: 0.2654 - val_accuracy: 0.9099
# loss
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.legend()
plt.show()
model.evaluate(test_images,test_labels)
313/313 [==============================] - 2s 8ms/step - loss: 0.2770 - accuracy: 0.9053
model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 13, 13, 32) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 5408) 0
_________________________________________________________________
dense_3 (Dense) (None, 64) 346176
_________________________________________________________________
dense_4 (Dense) (None, 10) 650
=================================================================
Total params: 347,146
Trainable params: 347,146
Non-trainable params: 0
_________________________________________________________________
Prédiction
i=346
plt.gray()
plt.imshow(test_images[i])
print('label: ', test_labels[i])
label: [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]
test_labels[i]
model.predict(test_images[i:i+1])
model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 13, 13, 32) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 5408) 0
_________________________________________________________________
dense_3 (Dense) (None, 64) 346176
_________________________________________________________________
dense_4 (Dense) (None, 10) 650
=================================================================
Total params: 347,146
Trainable params: 347,146
Non-trainable params: 0
_________________________________________________________________
print("test" ,classification_report(np.argmax(test_labels, axis=1), np.argmax(model.predict(test_images), axis=1)))
test precision recall f1-score support
0 0.87 0.84 0.86 1000
1 1.00 0.97 0.98 1000
2 0.85 0.87 0.86 1000
3 0.85 0.94 0.89 1000
4 0.91 0.78 0.84 1000
5 0.99 0.96 0.98 1000
6 0.72 0.78 0.75 1000
7 0.95 0.98 0.96 1000
8 0.99 0.98 0.98 1000
9 0.96 0.96 0.96 1000
accuracy 0.91 10000
macro avg 0.91 0.91 0.91 10000
weighted avg 0.91 0.91 0.91 10000
Image Generator
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
model2=models.Sequential()
model2.add(layers.Conv2D(32,(3,3), activation='relu'))
model2.add(layers.MaxPooling2D(2,2))
model2.add(layers.Flatten())
model2.add(layers.Dense(64, activation='relu'))
model2.add(layers.Dense(10, activation='softmax'))
model2.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
validation_split=0.2)
train_datagen.fit(train_images)
#tf.config.run_functions_eagerly(True)
model2.fit(train_datagen.flow(train_images, train_labels, batch_size=32,
subset='training', shuffle=True),
validation_data=train_datagen.flow(train_images, train_labels,
batch_size=8, subset='validation', shuffle=True),
steps_per_epoch=1500, epochs=10)
Epoch 1/10
1500/1500 [==============================] - 68s 45ms/step - loss: 0.7676 - accuracy: 0.7259 - val_loss: 0.4265 - val_accuracy: 0.8489
Epoch 2/10
1500/1500 [==============================] - 72s 48ms/step - loss: 0.4339 - accuracy: 0.8453 - val_loss: 0.4082 - val_accuracy: 0.8522
Epoch 3/10
1500/1500 [==============================] - 63s 42ms/step - loss: 0.3906 - accuracy: 0.8586 - val_loss: 0.3719 - val_accuracy: 0.8682
Epoch 4/10
1500/1500 [==============================] - 69s 46ms/step - loss: 0.3620 - accuracy: 0.8685 - val_loss: 0.3563 - val_accuracy: 0.8742
Epoch 5/10
1500/1500 [==============================] - 63s 42ms/step - loss: 0.3474 - accuracy: 0.8732 - val_loss: 0.3382 - val_accuracy: 0.8819
Epoch 6/10
1500/1500 [==============================] - 59s 39ms/step - loss: 0.3325 - accuracy: 0.8826 - val_loss: 0.3516 - val_accuracy: 0.8712
Epoch 7/10
1500/1500 [==============================] - 73s 49ms/step - loss: 0.3369 - accuracy: 0.8787 - val_loss: 0.3244 - val_accuracy: 0.8837
Epoch 8/10
1500/1500 [==============================] - 78s 52ms/step - loss: 0.3230 - accuracy: 0.8847 - val_loss: 0.3328 - val_accuracy: 0.8851
Epoch 9/10
1500/1500 [==============================] - 66s 44ms/step - loss: 0.3189 - accuracy: 0.8882 - val_loss: 0.3372 - val_accuracy: 0.8807
Epoch 10/10
1500/1500 [==============================] - 59s 39ms/step - loss: 0.3165 - accuracy: 0.8863 - val_loss: 0.3258 - val_accuracy: 0.8852
model2.evaluate(test_images,test_labels)
313/313 [==============================] - 3s 8ms/step - loss: 0.3917 - accuracy: 0.8895
Explicabilité
# select a set of background examples to take an expectation over
background = train_images[np.random.choice(train_images.shape[0], 100, replace=False)]
# explain predictions of the model on three images
e = shap.DeepExplainer(model, background)
# ...or pass tensors directly
# e = shap.DeepExplainer((model.layers[0].input, model.layers[-1].output), background)
shap_values = e.shap_values(test_images[:10])
# plot the feature attributions
shap_plot = shap.image_plot(shap_values, -test_images[:10])
display(shap_plot)
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: [<tf.Tensor: shape=(100, 28, 28, 1), dtype=float32, numpy=
array([[[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
...,
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]]],
[[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
...,
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]]],
[[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
...,
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]]],
...,
[[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
...,
[[0. ],
[0.47058824],
[1. ],
...,
[0.84705883],
[0.9764706 ],
[0.7882353 ]],
[[0. ],
[0.00784314],
[1. ],
...,
[0.8980392 ],
[1. ],
[0.5803922 ]],
[[0. ],
[0. ],
[0.54509807],
...,
[0.6666667 ],
[0.5568628 ],
[0.00392157]]],
[[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
...,
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]]],
[[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0.00392157],
[0. ],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0.01568628],
[0. ]],
...,
[[0.74509805],
[0.7372549 ],
[0.627451 ],
...,
[0. ],
[0. ],
[0. ]],
[[0. ],
[0.14117648],
[0.3882353 ],
...,
[0.00392157],
[0.00784314],
[0. ]],
[[0. ],
[0. ],
[0. ],
...,
[0. ],
[0. ],
[0. ]]]], dtype=float32)>]
Consider rewriting this model with the Functional API.
Your TensorFlow version is newer than 2.4.0 and so graph support has been removed in eager mode. See PR #1483 for discussion.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: [<tf.Tensor 'shap_rAnD:0' shape=(200, 28, 28, 1) dtype=float32>]
Consider rewriting this model with the Functional API.
`tf.keras.backend.set_learning_phase` is deprecated and will be removed after 2020-10-11. To update it, simply pass a True/False value to the `training` argument of the `__call__` method of your layer or model.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: [<tf.Tensor 'shap_rAnD:0' shape=(200, 28, 28, 1) dtype=float32>]
Consider rewriting this model with the Functional API.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: [<tf.Tensor 'shap_rAnD:0' shape=(200, 28, 28, 1) dtype=float32>]
Consider rewriting this model with the Functional API.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: [<tf.Tensor 'shap_rAnD:0' shape=(200, 28, 28, 1) dtype=float32>]
Consider rewriting this model with the Functional API.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: [<tf.Tensor 'shap_rAnD:0' shape=(200, 28, 28, 1) dtype=float32>]
Consider rewriting this model with the Functional API.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: [<tf.Tensor 'shap_rAnD:0' shape=(200, 28, 28, 1) dtype=float32>]
Consider rewriting this model with the Functional API.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: [<tf.Tensor 'shap_rAnD:0' shape=(200, 28, 28, 1) dtype=float32>]
Consider rewriting this model with the Functional API.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: [<tf.Tensor 'shap_rAnD:0' shape=(200, 28, 28, 1) dtype=float32>]
Consider rewriting this model with the Functional API.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: [<tf.Tensor 'shap_rAnD:0' shape=(200, 28, 28, 1) dtype=float32>]
Consider rewriting this model with the Functional API.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: [<tf.Tensor 'shap_rAnD:0' shape=(200, 28, 28, 1) dtype=float32>]
Consider rewriting this model with the Functional API.
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: [<tf.Tensor: shape=(10, 28, 28, 1), dtype=float32, numpy=
array([[[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
...,
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]]],
[[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
...,
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]]],
[[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
...,
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]]],
...,
[[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
...,
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]]],
[[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
...,
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]]],
[[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
...,
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]],
[[0.],
[0.],
[0.],
...,
[0.],
[0.],
[0.]]]], dtype=float32)>]
Consider rewriting this model with the Functional API.
class_names = ['T_shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure(figsize=(10, 10))
for i in range(10):
plt.subplot(2, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(test_images[i].reshape((28,28)))
label_index = np.argmax(test_labels[i])#int(test_labels[i])
plt.title(class_names[label_index] + " " + str(label_index))
plt.show()
Transfer learning
(X_train_data, y_train_data), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
y_train = tf.keras.utils.to_categorical(y_train_data, 10)
y_test = tf.keras.utils.to_categorical(y_test, 10)
X_train= np.array([i.flatten() for i in X_train_data])
X_test= np.array([i.flatten() for i in X_test])
Y_train= np.array (y_train_data) # (60000,)
Y_test = np.array(y_test) #(10000,)
# Convert the images into 3 channels
X_train = np.dstack([X_train] * 3)
X_test = np.dstack([X_test]*3)
print(X_train.shape,X_test.shape)
# Reshape images as per the tensor format required by tensorflow
X_train = X_train.reshape(-1, 28,28,3)
X_test= X_test.reshape (-1,28,28,3)
print(X_train.shape,X_test.shape)
# Resize the images 48*48 as required by VGG16
from tensorflow.keras.preprocessing.image import img_to_array, array_to_img
X_train = np.asarray([img_to_array(array_to_img(im, scale=False).resize((48,48))) for im in X_train])
X_test = np.asarray([img_to_array(array_to_img(im, scale=False).resize((48,48))) for im in X_test])
#train_x = preprocess_input(x)
print(X_train.shape, X_test.shape)
(60000, 784, 3) (10000, 784, 3)
(60000, 28, 28, 3) (10000, 28, 28, 3)
Execution error
KernelInterrupted: Execution interrupted by the Jupyter kernel.
from tensorflow.keras.layers import Input, Lambda, Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
# add preprocessing layer to the front of VGG
vgg = VGG16(input_shape = [48, 48, 3], weights='imagenet', include_top=False)
# don't train existing weights
for layer in vgg.layers:
layer.trainable = False
# our layers - you can add more if you want
x = Flatten()(vgg.output)
#x = tf.keras.layers.MaxPooling2D(2, 2)(x)
#x = Dense(1000, activation='relu')(x)
prediction = Dense(10, activation='softmax')(x)
# create a model object
model = Model(inputs=vgg.input, outputs=prediction)
# view the structure of the model
model.summary()
# tell the model what cost and optimization method to use
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
2021-10-10 00:11:21.649886: I tensorflow/compiler/jit/xla_cpu_device.cc:41] Not creating XLA devices, tf_xla_enable_xla_devices not set
2021-10-10 00:11:21.652893: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory
2021-10-10 00:11:21.652914: W tensorflow/stream_executor/cuda/cuda_driver.cc:326] failed call to cuInit: UNKNOWN ERROR (303)
2021-10-10 00:11:21.652932: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (p-f8fabd0e-d964-4ee6-afbc-4ab7cbdf19ec): /proc/driver/nvidia/version does not exist
2021-10-10 00:11:21.653130: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2021-10-10 00:11:21.653292: I tensorflow/compiler/jit/xla_gpu_device.cc:99] Not creating XLA devices, tf_xla_enable_xla_devices not set
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 48, 48, 3)] 0
_________________________________________________________________
block1_conv1 (Conv2D) (None, 48, 48, 64) 1792
_________________________________________________________________
block1_conv2 (Conv2D) (None, 48, 48, 64) 36928
_________________________________________________________________
block1_pool (MaxPooling2D) (None, 24, 24, 64) 0
_________________________________________________________________
block2_conv1 (Conv2D) (None, 24, 24, 128) 73856
_________________________________________________________________
block2_conv2 (Conv2D) (None, 24, 24, 128) 147584
_________________________________________________________________
block2_pool (MaxPooling2D) (None, 12, 12, 128) 0
_________________________________________________________________
block3_conv1 (Conv2D) (None, 12, 12, 256) 295168
_________________________________________________________________
block3_conv2 (Conv2D) (None, 12, 12, 256) 590080
_________________________________________________________________
block3_conv3 (Conv2D) (None, 12, 12, 256) 590080
_________________________________________________________________
block3_pool (MaxPooling2D) (None, 6, 6, 256) 0
_________________________________________________________________
block4_conv1 (Conv2D) (None, 6, 6, 512) 1180160
_________________________________________________________________
block4_conv2 (Conv2D) (None, 6, 6, 512) 2359808
_________________________________________________________________
block4_conv3 (Conv2D) (None, 6, 6, 512) 2359808
_________________________________________________________________
block4_pool (MaxPooling2D) (None, 3, 3, 512) 0
_________________________________________________________________
block5_conv1 (Conv2D) (None, 3, 3, 512) 2359808
_________________________________________________________________
block5_conv2 (Conv2D) (None, 3, 3, 512) 2359808
_________________________________________________________________
block5_conv3 (Conv2D) (None, 3, 3, 512) 2359808
_________________________________________________________________
block5_pool (MaxPooling2D) (None, 1, 1, 512) 0
_________________________________________________________________
flatten (Flatten) (None, 512) 0
_________________________________________________________________
dense (Dense) (None, 10) 5130
=================================================================
Total params: 14,719,818
Trainable params: 5,130
Non-trainable params: 14,714,688
_________________________________________________________________
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
validation_split=0.2)
train_datagen.fit(X_train)
tf.config.run_functions_eagerly(True)
model.fit(train_datagen.flow(X_train, y_train, batch_size=32,
subset='training', shuffle=True),
validation_data=train_datagen.flow(X_train, y_train,
batch_size=8, subset='validation', shuffle=True),
steps_per_epoch=1500, epochs=10)
/shared-libs/python3.7/py/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py:3504: UserWarning: Even though the tf.config.experimental_run_functions_eagerly option is set, this option does not apply to tf.data functions. tf.data functions are still traced and executed as graphs.
"Even though the tf.config.experimental_run_functions_eagerly "
2021-10-10 00:07:48.756808: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:116] None of the MLIR optimization passes are enabled (registered 2)
2021-10-10 00:07:48.762135: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2499995000 Hz
Epoch 1/10
23/3000 [..............................] - ETA: 1:31:11 - loss: 2.6355 - accuracy: 0.0928
Execution error
KeyboardInterrupt:
Execution error
KernelInterrupted: Execution interrupted by the Jupyter kernel.