import os
import numpy as np
from tensorflow.keras.preprocessing import image
from keras.models import Model
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.preprocessing import image_dataset_from_directory
BATCH_SIZE = 32
path_train = "frontal_lateral/train"
path_val = "frontal_lateral/validation"
path_test = "frontal_lateral/test"
n_rows = 160
n_cols = 160
train_dataset = image_dataset_from_directory(
path_train,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=(n_rows, n_cols)
)
validation_dataset = image_dataset_from_directory(
path_val,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=(n_rows, n_cols)
)
test_dataset = image_dataset_from_directory(
path_test,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=(n_rows, n_cols)
)
class_names = train_dataset.class_names
print('Etiquetas encontradas: ', class_names)
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for images, labels in train_dataset.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
if labels[i] == 0:
plt.title('Frontal')
elif labels[i] == 1:
plt.title('Lateral')
plt.axis("off")
# Autotune para que no se bloquee la red
AUTOTUNE = tf.data.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)
test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)
# Vamos a ver valores de intensidad mínimo y máximo de una imagen antes de realizar el escalado
for images, labels in train_dataset.take(1):
print('Intensidad mínima: ', images[0].numpy().min())
print('Intensidad máxima: ', images[0].numpy().max())
def scaling_normalization(image, label):
image = tf.cast(image/255., tf.float32)
return image, label
train_dataset = train_dataset.map(scaling_normalization)
validation_dataset = validation_dataset.map(scaling_normalization)
test_dataset = test_dataset.map(scaling_normalization)
def create_model():
input_layer = layers.Input(shape=[160,160,3])
layer_conv = layers.Conv2D(filters=16, kernel_size=(3, 3), strides=(1, 1), padding="same", activation="relu")(input_layer)
pool = layers.MaxPool2D(pool_size=(2, 2))(layer_conv)
layer_conv2 = layers.Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), padding="same", activation="relu")(pool)
pool2 = layers.MaxPool2D(pool_size=(2, 2))(layer_conv2)
layer_conv3 = layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding="same", activation="relu")(pool2)
flatten = layers.Flatten()(layer_conv3)
dense_hidden = layers.Dense(32, activation='relu')(flatten)
dense_output = layers.Dense(1, activation='sigmoid')(dense_hidden)
model_base = Model(inputs=[input_layer], outputs=[dense_output])
return model_base
model_base = create_model()
model_base.summary()
model_base.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model_base.fit(train_dataset,
epochs=12,
batch_size=32,
validation_data=validation_dataset,
verbose=1)
# Método para la visualización de la exactitud obtenida durante el proceso de entrenamiento tanto en entrenamiento como en validación
def plot_losses(history):
plt.plot(history.history['loss'], label="Entrenamiento")
plt.plot(history.history['val_loss'], label="Validación")
plt.ylabel('Pérdidas')
plt.xlabel('Época')
plt.legend(loc="upper right")
plt.title('Pérdidas durante el entrenamiento')
# Método para la visualización de la exactitud obtenida durante el proceso de entrenamiento tanto en entrenamiento como en validación
def plot_accuracy(history):
plt.plot(history.history['accuracy'], label="Entrenamiento")
plt.plot(history.history['val_accuracy'], label="Validación")
plt.ylabel('Exactitud')
plt.xlabel('Época')
plt.legend(loc="upper right")
plt.title('Exactitud durante el entrenamiento')
plot_losses(history)
plot_accuracy(history)
model_base.save('modelo_base.h5')
def tipoImagen(filepath):
img = image.load_img(filepath, target_size = (160,160))
plt.imshow(img)
Y = image.img_to_array(img)
X = np.expand_dims(Y, axis=0)
result = model_base.predict(X)
print(result[0][0])
if result == 1:
print('Lateral')
elif result == 0:
print('Frontal')
tipoImagen("frontal_lateral/test/frontal/patient01262_study6_view1_frontal.jpg")
tipoImagen("frontal_lateral/test/lateral/patient00336_study1_view2_lateral.jpg")
!pip3 install albumentations
!pip3 install git+https://github.com/mjkvaak/ImageDataAugmentor
import albumentations as A
from ImageDataAugmentor.image_data_augmentor import *
IMG_SIZE = (160,160)
BATCH_SIZE = 32
# Implementamos la rutina de transformaciones
transforms = A.Compose([
A.Rotate(limit=40, p=0.5),
A.RandomBrightnessContrast(p=0.5),
A.HorizontalFlip(p=0.5)
])
# Generador de datos de entrenamiento
train_datagen = ImageDataAugmentor(
augment=transforms,
validation_split=0.2,
seed=123)
# Generador de datos de validación sin aumento de datos
val_datagen = ImageDataAugmentor(
validation_split=0.2,
seed=123)
# Dataset de entrenamiento
train_dataset = train_datagen.flow_from_directory(
path_train,
subset="training",
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='sparse',
shuffle=True)
# Dataset validación
validation_dataset = val_datagen.flow_from_directory(
path_val,
subset="validation",
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='sparse',
shuffle=True)
train_dataset.show_data()
validation_dataset.show_data()
model_aumento = create_model()
model_aumento.summary()
model_aumento.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history_aumento = model_aumento.fit(train_dataset,
epochs=12,
batch_size=32,
validation_data=validation_dataset,
verbose=1)
plot_losses(history_aumento)
plot_accuracy(history_aumento)
IMG_SHAPE = IMG_SIZE + (3,)
base_mobile = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
base_mobile.summary()
base_mobile.trainable= False
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
input = layers.Input(shape=(160, 160, 3))
# Capa preprocesado
x = preprocess_input(input)
# Añadimos modelo base
x = base_mobile(x, training=False)
x = layers.GlobalAveragePooling2D()(x)
# Clasificación binaria
output = layers.Dense(1, activation="sigmoid")(x)
model_tl = Model(inputs=[input], outputs=[output])
model_tl.summary()
path_models = "./modelos/"
path_experiment = path_models + 'Train/'
# Compilamos
model_tl.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# Inicializamos el callback
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=path_experiment + 'model.h5',
monitor='val_accuracy',
mode='max',
save_best_only=True,
verbose=1)
# Entrenamos
history_tl = model_tl.fit(train_dataset,
epochs=10,
validation_data=validation_dataset,
callbacks=[model_checkpoint_callback],
verbose=1)
# Visualizamos curvas de aprendizaje
plt.figure(figsize=(10, 8))
plt.plot(history_tl.history['accuracy'], label='Entrenamiento')
plt.plot(history_tl.history['val_accuracy'], label='Validación')
plt.title('Exactitud entrenamiento vs. validación')
plt.legend()
# Comparamos curvas de aprendizaje validación
plt.figure(figsize=(10, 8))
plt.plot(history.history['val_accuracy'], label='Sin trans. conocimiento')
plt.plot(history_tl.history['val_accuracy'], label='Con trans. conocimiento')
plt.title('Exactitud sin vs. con transferencia conocimiento (validación)')
plt.legend()
model_tl.save("modelo_transf.h5")