from tensorflow.keras.datasets import cifar10
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
plt.imshow(x_train[462])
x_train_n = (x_train - np.mean(x_train))/np.std(x_train)
x_test_n = (x_test - np.mean(x_train))/np.std(x_train)
plt.imshow(x_train_n[462])
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras import Sequential
import pandas as pd
import numpy as np
df = pd.DataFrame(y_train, columns=['train'])
df['train'].unique().shape
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3,3), activation='relu', input_shape=[32,32,3]))
model.add(Flatten())
model.add(Dense(units=300, activation='relu'))
model.add(Dense(units=10))
model.compile(optimizer='sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
x_train_n.shape
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 30, 30, 32) 896
flatten (Flatten) (None, 28800) 0
dense (Dense) (None, 300) 8640300
dense_1 (Dense) (None, 10) 3010
=================================================================
Total params: 8,644,206
Trainable params: 8,644,206
Non-trainable params: 0
_________________________________________________________________
model.fit(x_train_n, y_train, epochs=10, batch_size=32, validation_data=(x_test_n, y_test))
Epoch 1/10
1563/1563 [==============================] - 134s 85ms/step - loss: 1.5003 - accuracy: 0.4680 - val_loss: 1.3331 - val_accuracy: 0.5234
Epoch 2/10
1563/1563 [==============================] - 133s 85ms/step - loss: 1.1763 - accuracy: 0.5893 - val_loss: 1.1915 - val_accuracy: 0.5833
Epoch 3/10
1563/1563 [==============================] - 132s 84ms/step - loss: 1.0118 - accuracy: 0.6478 - val_loss: 1.1424 - val_accuracy: 0.6005
Epoch 4/10
1563/1563 [==============================] - 141s 90ms/step - loss: 0.8621 - accuracy: 0.7019 - val_loss: 1.1038 - val_accuracy: 0.6172
Epoch 5/10
1563/1563 [==============================] - 139s 89ms/step - loss: 0.7155 - accuracy: 0.7541 - val_loss: 1.1345 - val_accuracy: 0.6225
Epoch 6/10
1563/1563 [==============================] - 139s 89ms/step - loss: 0.5717 - accuracy: 0.8074 - val_loss: 1.1328 - val_accuracy: 0.6295
Epoch 7/10
1563/1563 [==============================] - 139s 89ms/step - loss: 0.4306 - accuracy: 0.8587 - val_loss: 1.1776 - val_accuracy: 0.6289
Epoch 8/10
1563/1563 [==============================] - 143s 91ms/step - loss: 0.3089 - accuracy: 0.9046 - val_loss: 1.2795 - val_accuracy: 0.6310
Epoch 9/10
1563/1563 [==============================] - 141s 90ms/step - loss: 0.2057 - accuracy: 0.9418 - val_loss: 1.3605 - val_accuracy: 0.6341
Epoch 10/10
1563/1563 [==============================] - 142s 91ms/step - loss: 0.1259 - accuracy: 0.9704 - val_loss: 1.4540 - val_accuracy: 0.6370
model4 = Sequential()
model4.add(Conv2D(filters=32, kernel_size=(3,3), activation='relu', input_shape=[32,32,3]))
model4.add(Conv2D(filters=32, kernel_size=(3,3), activation='relu'))
model4.add(Flatten())
model4.add(Dense(units=300, activation='relu'))
model4.add(Dense(units=300, activation='relu'))
model4.add(Dense(units=10))
model4.compile(optimizer='sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model4.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_1 (Conv2D) (None, 30, 30, 32) 896
conv2d_2 (Conv2D) (None, 28, 28, 32) 9248
flatten_1 (Flatten) (None, 25088) 0
dense_2 (Dense) (None, 300) 7526700
dense_3 (Dense) (None, 300) 90300
dense_4 (Dense) (None, 10) 3010
=================================================================
Total params: 7,630,154
Trainable params: 7,630,154
Non-trainable params: 0
_________________________________________________________________
model4.fit(x_train_n, y_train, epochs=10, batch_size=32, validation_data=(x_test_n, y_test))
Epoch 1/10
1563/1563 [==============================] - 209s 133ms/step - loss: 1.5688 - accuracy: 0.4414 - val_loss: 1.3244 - val_accuracy: 0.5241
Epoch 2/10
1563/1563 [==============================] - 209s 133ms/step - loss: 1.2028 - accuracy: 0.5740 - val_loss: 1.1684 - val_accuracy: 0.5806
Epoch 3/10
1563/1563 [==============================] - 209s 134ms/step - loss: 1.0269 - accuracy: 0.6360 - val_loss: 1.0679 - val_accuracy: 0.6205
Epoch 4/10
1563/1563 [==============================] - 209s 134ms/step - loss: 0.8667 - accuracy: 0.6944 - val_loss: 1.2157 - val_accuracy: 0.5804
Epoch 5/10
1563/1563 [==============================] - 209s 134ms/step - loss: 0.7119 - accuracy: 0.7488 - val_loss: 1.0776 - val_accuracy: 0.6379
Epoch 6/10
1563/1563 [==============================] - 209s 134ms/step - loss: 0.5550 - accuracy: 0.8091 - val_loss: 1.0218 - val_accuracy: 0.6642
Epoch 7/10
1563/1563 [==============================] - 208s 133ms/step - loss: 0.4013 - accuracy: 0.8627 - val_loss: 1.1726 - val_accuracy: 0.6456
Epoch 8/10
1563/1563 [==============================] - 207s 132ms/step - loss: 0.2560 - accuracy: 0.9166 - val_loss: 1.2810 - val_accuracy: 0.6573
Epoch 9/10
1563/1563 [==============================] - 207s 133ms/step - loss: 0.1577 - accuracy: 0.9497 - val_loss: 1.4353 - val_accuracy: 0.6467
Epoch 10/10
1563/1563 [==============================] - 207s 132ms/step - loss: 0.0911 - accuracy: 0.9730 - val_loss: 1.6856 - val_accuracy: 0.6412
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 30, 30, 32) 896
flatten (Flatten) (None, 28800) 0
dense (Dense) (None, 300) 8640300
dense_1 (Dense) (None, 10) 3010
=================================================================
Total params: 8,644,206
Trainable params: 8,644,206
Non-trainable params: 0
_________________________________________________________________
model2 = Sequential()
model2.add(Conv2D(filters=32, kernel_size=(3,3), activation='relu', input_shape=[32,32,3]))
model2.add(MaxPooling2D(pool_size=(2,2)))
model2.add(Conv2D(filters=32, kernel_size=(3,3), activation='relu'))
model2.add(Flatten())
model2.add(Dense(units=300, activation='relu'))
model2.add(Dense(units=300, activation='relu'))
model2.add(Dense(units=10))
model2.summary()
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_3 (Conv2D) (None, 30, 30, 32) 896
max_pooling2d (MaxPooling2D (None, 15, 15, 32) 0
)
conv2d_4 (Conv2D) (None, 13, 13, 32) 9248
flatten_2 (Flatten) (None, 5408) 0
dense_5 (Dense) (None, 300) 1622700
dense_6 (Dense) (None, 300) 90300
dense_7 (Dense) (None, 10) 3010
=================================================================
Total params: 1,726,154
Trainable params: 1,726,154
Non-trainable params: 0
_________________________________________________________________
model3 = Sequential()
model3.add(Conv2D(filters=32, kernel_size=(3,3), activation='relu', input_shape=[32,32,3]))
model3.add(MaxPooling2D(pool_size=(4,4)))
model3.add(Conv2D(filters=32, kernel_size=(3,3), activation='relu'))
model3.add(Flatten())
model3.add(Dense(units=300, activation='relu'))
model3.add(Dense(units=300, activation='relu'))
model3.add(Dense(units=10))
model3.summary()
Model: "sequential_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_5 (Conv2D) (None, 30, 30, 32) 896
max_pooling2d_1 (MaxPooling (None, 7, 7, 32) 0
2D)
conv2d_6 (Conv2D) (None, 5, 5, 32) 9248
flatten_3 (Flatten) (None, 800) 0
dense_8 (Dense) (None, 300) 240300
dense_9 (Dense) (None, 300) 90300
dense_10 (Dense) (None, 10) 3010
=================================================================
Total params: 343,754
Trainable params: 343,754
Non-trainable params: 0
_________________________________________________________________
model3.compile(optimizer='sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model3.fit(x_train_n, y_train, epochs=10, batch_size=32, validation_data=(x_test_n, y_test))
Epoch 1/10
1563/1563 [==============================] - 42s 27ms/step - loss: 1.6848 - accuracy: 0.3952 - val_loss: 1.5244 - val_accuracy: 0.4558
Epoch 2/10
1563/1563 [==============================] - 42s 27ms/step - loss: 1.3193 - accuracy: 0.5313 - val_loss: 1.3233 - val_accuracy: 0.5106
Epoch 3/10
1563/1563 [==============================] - 42s 27ms/step - loss: 1.1726 - accuracy: 0.5875 - val_loss: 1.1490 - val_accuracy: 0.5877
Epoch 4/10
1563/1563 [==============================] - 42s 27ms/step - loss: 1.0702 - accuracy: 0.6255 - val_loss: 1.1004 - val_accuracy: 0.6147
Epoch 5/10
1563/1563 [==============================] - 42s 27ms/step - loss: 0.9951 - accuracy: 0.6507 - val_loss: 1.0249 - val_accuracy: 0.6430
Epoch 6/10
1563/1563 [==============================] - 50s 32ms/step - loss: 0.9339 - accuracy: 0.6745 - val_loss: 0.9882 - val_accuracy: 0.6602
Epoch 7/10
1563/1563 [==============================] - 43s 28ms/step - loss: 0.8758 - accuracy: 0.6951 - val_loss: 0.9399 - val_accuracy: 0.6722
Epoch 8/10
1563/1563 [==============================] - 46s 29ms/step - loss: 0.8237 - accuracy: 0.7113 - val_loss: 0.9099 - val_accuracy: 0.6836
Epoch 9/10
1563/1563 [==============================] - 43s 28ms/step - loss: 0.7798 - accuracy: 0.7278 - val_loss: 0.8964 - val_accuracy: 0.6866
Epoch 10/10
1563/1563 [==============================] - 43s 27ms/step - loss: 0.7347 - accuracy: 0.7437 - val_loss: 0.8730 - val_accuracy: 0.6984