!apt-get update
!apt-get install ffmpeg libsm6 libxext6 -y
!pip install opencv-python==4.6.0.66
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import random
import pandas as pd
!ls
directorio = r'/work/'
#Nombres = ["Blackpanter","Blackwidow","Hulk","Ironman","Spiderman","Thor"]
Nombres = ["Blackwidow","Hulk","Ironman","Spiderman"]
len(Nombres)
data=[]
talla=100
for Nombre in Nombres:
labelN=Nombres.index(Nombre)
carpeta=os.path.join(directorio,Nombre)
for imagen in os.listdir(carpeta):
imagen_path=os.path.join(carpeta, imagen)
imagen_path=os.path.join(carpeta, imagen)
imagen_array=cv2.imread(imagen_path,cv2.IMREAD_GRAYSCALE)
imgBGR=cv2.imread(imagen_path)
#imgRGB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2RGB)
imagen_array = imgBGR
#imagen_array=cv2.imread(imagen_path,cv2.IMREAD_GRAYSCALE)
try:
imagen_array=cv2.resize(imagen_array,(talla,talla))
data.append([imagen_array,labelN])
except Exception as e:
print(str(e))
df = pd.DataFrame(data)
freq = df.groupby([1]).count()
print(freq)
Nombres
df.head()
random.shuffle(data)
for i in range(4):
plt.imshow(data[(i*300)][0])
plt.show()
print(data[(i*300)][1])
Nombres[3]
data_train= data[:int(len(data)*(0.7))]
data_test= data[int(len(data)*(0.7)):]
X_train=[]
Y_train=[]
for imagenes, labels in data_train:
X_train.append(imagenes)
Y_train.append(labels)
X_train=np.array(X_train)
Y_train=np.array(Y_train)
X_train=tf.keras.utils.normalize(X_train,axis=1) # Normalizado
X_test=[]
Y_test=[]
for imagenes, labels in data_test:
X_test.append(imagenes)
Y_test.append(labels)
X_test=np.array(X_test)
Y_test=np.array(Y_test)
X_test=tf.keras.utils.normalize(X_test,axis=1) # Normalizado
X_train=X_train.reshape(-1,100,100,3)
X_test=X_test.reshape(-1,100,100,3)
modelo1 = tf.keras.models.Sequential()
# Add the existing layers to the model
modelo1.add(tf.keras.layers.Conv2D(100, (5,5), activation='relu'))
modelo1.add(tf.keras.layers.MaxPooling2D((3,3), strides=2))
modelo1.add(tf.keras.layers.Flatten())
modelo1.add(tf.keras.layers.Dense(128, activation='relu', input_shape=(talla, talla)))
modelo1.add(tf.keras.layers.Dense(64, activation='relu'))
# Change the activation function of the output layer
modelo1.add(tf.keras.layers.Dense(4, activation='sigmoid'))
# Compile the model with the same optimizer and loss function as before
modelo1.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics='accuracy')
modelo1.fit(X_train,Y_train,epochs=5)
modelo1.evaluate(X_test,Y_test)
modelo3=tf.keras.models.Sequential()
modelo3.add(tf.keras.layers.Conv2D(100,(10,10),activation='relu'))
modelo3.add(tf.keras.layers.MaxPooling2D((1,1),strides=2))
modelo3.add(tf.keras.layers.Flatten())
modelo3.add(tf.keras.layers.Dense(128,activation='relu', input_shape=(talla,talla)))
modelo3.add(tf.keras.layers.Dense(9,activation='softmax'))
modelo3.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics='accuracy')
modelo3.fit(X_train,Y_train,epochs=1)
modelo3.evaluate(X_test,Y_test)
modelo4= tf.keras.models.Sequential()
# Add the existing layers to the model
modelo4.add(tf.keras.layers.Conv2D(100, (5,5), activation='relu'))
modelo4.add(tf.keras.layers.MaxPooling2D((3,3), strides=2))
modelo4.add(tf.keras.layers.Flatten())
modelo4.add(tf.keras.layers.Dense(128, activation='relu', input_shape=(talla, talla)))
# Add a new dense layer
modelo4.add(tf.keras.layers.Dense(64, activation='relu'))
# Use L2 weight decay regularization in the dense layers
modelo4.add(tf.keras.layers.Dense(4, activation='sigmoid', kernel_regularizer=tf.keras.regularizers.l2(0.001)))
# Compile the model with the same loss function as before
modelo4.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics='accuracy')
modelo4.fit(X_train,Y_train,epochs=5)
modelo4.evaluate(X_test,Y_test)
modelo6 = tf.keras.models.Sequential()
# Add the existing layers to the model
modelo6.add(tf.keras.layers.Conv2D(100, (5,5), activation='relu'))
modelo6.add(tf.keras.layers.MaxPooling2D((3,3), strides=2))
modelo6.add(tf.keras.layers.Flatten())
modelo6.add(tf.keras.layers.Dense(128, activation='relu', input_shape=(talla, talla)))
# Add a new dense layer
modelo6.add(tf.keras.layers.Dense(64, activation='relu'))
# Use dropout regularization in the dense layers
modelo6.add(tf.keras.layers.Dropout(0.5))
modelo6.add(tf.keras.layers.Dense(4, activation='sigmoid'))
modelo6.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics='accuracy')
modelo6.fit(X_train,Y_train,epochs=1)
modelo6.evaluate(X_test,Y_test)
imagen=cv2.imread('/work/Ironman/Iron Man (1).jpg')
plt.imshow(imagen)
plt.show()
imagen=cv2.cvtColor(imagen, cv2.COLOR_BGR2GRAY)
imagen=cv2.resize(imagen,(talla,talla),interpolation=cv2.INTER_AREA)
imagen=tf.keras.utils.normalize(imagen,axis=1)
plt.imshow(imagen, cmap=plt.cm.binary)
plt.show()
imagen=imagen.reshape(-1,talla,talla,1)
Nombres[int(np.argmax(modelo4.predict(imagen)))]
for i in range(3*10):
imagen = X_test[i+1]
plt.imshow(imagen,cmap=plt.cm.binary)
plt.show()
imagen=imagen.reshape(-1,talla,talla,1)
print(Nombres[int(np.argmax(modelo1.predict(imagen)))])
datafake=[]
talla=100
carpeta=os.path.join(directorio,'Fake')
for imagen in os.listdir(carpeta):
imagen_path=os.path.join(carpeta, imagen)
imagen_array=cv2.imread(imagen_path,cv2.IMREAD_GRAYSCALE)
imgBGR=cv2.imread(imagen_path)
imgRGB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2RGB)
try:
imagen_array=cv2.resize(imagen_array,(talla,talla))
datafake.append([imagen_array,imgRGB])
except Exception as e:
print(str(e))
for i in range(len(datafake)):
plt.imshow(datafake[i][1])
plt.show()
imagen=datafake[i][0]
imagen=cv2.resize(imagen,(talla,talla),interpolation=cv2.INTER_AREA)
imagen=tf.keras.utils.normalize(imagen,axis=1)
imagen=imagen.reshape(-1,talla,talla,1)
print(Nombres[int(np.argmax(modelo1.predict(imagen)))])
freq = df.groupby([1]).count()
print(freq)