import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
from tensorflow.keras import layers, models
import tensorflow.keras.backend as K
import numpy as np
import matplotlib.pyplot as plt
import os
import glob
from pathlib import Path
root_path = "lung_segmentation"
N_ROWS = 160
N_COLS = 160
N_LABELS = 1
def read_image(image_path, mask_path):
# Leemos la imagen del directorio
image = tf.io.read_file(image_path)
# Decodificamos la imagen en función del tipo de codificación (PNG) en nuestro caso
image = tf.image.decode_png(image, channels=3)
# Convertimos la imagen a float32 --> Este método al mismo tiempo, al especificarle tipo de dato float hace el escalado a [0,1]
image = tf.image.convert_image_dtype(image, tf.float32)
# Redimensionamos la imagen al tamaño deseado
image = tf.image.resize(image, (N_ROWS, N_COLS), method='nearest')
# Leemos la máscara del directorio
mask = tf.io.read_file(mask_path)
# Decodificamos la imagen en función del tipo de codificación (PNG) en nuestro caso
mask = tf.image.decode_png(mask, channels=1)
# Las máscaras están guardadas de manera que el número de la etiqueta (0-12) se encuentra en el primer canal
# Por lo tanto, nos quedamos con el valor máximo de cada canal para crear una matriz de tamaño [FILAS, COLUMNAS, 1]
mask = tf.math.reduce_max(mask, axis=-1, keepdims=True)
# Redimensionamos la imagen al tamaño deseado
mask = tf.image.resize(mask, (N_ROWS, N_COLS), method='nearest')
mask = mask / 255
mask = tf.cast(mask, 'uint8')
return image, mask
def dataset_generator(image_paths, mask_paths, buffer_size, batch_size):
image_list = tf.constant(image_paths)
mask_list = tf.constant(mask_paths)
dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
# A cada elemento del dataset (directorios imagenes y máscaras) les aplicamos el método read_image
dataset = dataset.map(read_image, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.cache().shuffle(buffer_size).batch(batch_size)
return dataset
img_dir_train = 'lung_segmentation/train/images'
mask_dir_train = 'lung_segmentation/train/masks'
train_image_paths = sorted(glob.glob(os.path.join(img_dir_train, '*.png')))
train_mask_paths = sorted(glob.glob(os.path.join(mask_dir_train, '*.png')))
img_dir_val = 'lung_segmentation/validation/images'
mask_dir_val = 'lung_segmentation/validation/masks'
val_image_paths = sorted(glob.glob(os.path.join(img_dir_val, '*.png')))
val_mask_paths = sorted(glob.glob(os.path.join(mask_dir_val, '*.png')))
img_dir_test = 'lung_segmentation/test/images'
mask_dir_test = 'lung_segmentation/test/masks'
test_image_paths = sorted(glob.glob(os.path.join(img_dir_val, '*.png')))
test_mask_paths = sorted(glob.glob(os.path.join(mask_dir_val, '*.png')))
print('Número de imágenes de entrenamiento: ', len(train_image_paths))
print('Número de imágenes de validación: ', len(val_image_paths))
print('Número de imágenes de test: ', len(test_image_paths))
Número de imágenes de entrenamiento: 2000
Número de imágenes de validación: 319
Número de imágenes de test: 319
BATCH_SIZE = 32
BUFFER_SIZE = 500
train_dataset = dataset_generator(train_image_paths, train_mask_paths, BUFFER_SIZE, BATCH_SIZE)
validation_dataset = dataset_generator(val_image_paths, val_mask_paths, BUFFER_SIZE, BATCH_SIZE)
test_dataset = dataset_generator(test_image_paths, test_mask_paths, BUFFER_SIZE, BATCH_SIZE)
for images, masks in train_dataset.take(1):
for i in range(3):
image, mask = images[i], masks[i]
plt.figure(figsize=(20,8))
plt.subplot(121)
plt.imshow(image)
plt.axis('off')
plt.title('Imagen')
plt.subplot(122)
plt.imshow(mask[:,:,0])
plt.axis('off')
plt.title('Máscara')
plt.show()
2022-05-21 12:57:45.541787: W tensorflow/core/kernels/data/cache_dataset_ops.cc:757] The calling iterator did not fully read the dataset being cached. In order to avoid unexpected truncation of the dataset, the partially cached contents of the dataset will be discarded. This can happen if you have an input pipeline similar to `dataset.cache().take(k).repeat()`. You should use `dataset.take(k).cache().repeat()` instead.
IMAGE_SHAPE = (N_ROWS, N_COLS, 3)
ACTIVATION = 'sigmoid'
def get_unet():
# Encoding phase
inputs = layers.Input(IMAGE_SHAPE)
conv1 = layers.Conv2D(32, (3, 3), padding='same')(inputs)
conv1 = layers.BatchNormalization()(conv1)
conv1 = layers.Activation('relu')(conv1)
conv1 = layers.Conv2D(32, (3, 3), padding='same')(conv1)
conv1 = layers.BatchNormalization()(conv1)
conv1 = layers.Activation('relu')(conv1)
pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = layers.Conv2D(64, (3, 3), padding='same')(pool1)
conv2 = layers.BatchNormalization()(conv2)
conv2 = layers.Activation('relu')(conv2)
conv2 = layers.Conv2D(64, (3, 3), padding='same')(conv2)
conv2 = layers.BatchNormalization()(conv2)
conv2 = layers.Activation('relu')(conv2)
pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = layers.Conv2D(128, (3, 3), padding='same')(pool2)
conv3 = layers.BatchNormalization()(conv3)
conv3 = layers.Activation('relu')(conv3)
conv3 = layers.Conv2D(128, (3, 3), padding='same')(conv3)
conv3 = layers.BatchNormalization()(conv3)
conv3 = layers.Activation('relu')(conv3)
pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = layers.Conv2D(256, (3, 3), padding='same')(pool3)
conv4 = layers.BatchNormalization()(conv4)
conv4 = layers.Activation('relu')(conv4)
conv4 = layers.Conv2D(256, (3, 3), padding='same')(conv4)
conv4 = layers.BatchNormalization()(conv4)
conv4 = layers.Activation('relu')(conv4)
pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)
# Decoding phase
conv5 = layers.Conv2D(512, (3, 3), padding='same')(pool4)
conv5 = layers.BatchNormalization()(conv5)
conv5 = layers.Activation('relu')(conv5)
conv5 = layers.Conv2D(512, (3, 3), padding='same')(conv5)
conv5 = layers.BatchNormalization()(conv5)
conv5 = layers.Activation('relu')(conv5)
up6 = layers.concatenate([layers.Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = layers.Conv2D(256, (3, 3), padding='same')(up6)
conv6 = layers.BatchNormalization()(conv6)
conv6 = layers.Activation('relu')(conv6)
conv6 = layers.Conv2D(256, (3, 3), padding='same')(conv6)
conv6 = layers.BatchNormalization()(conv6)
conv6 = layers.Activation('relu')(conv6)
up7 = layers.concatenate([layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = layers.Conv2D(128, (3, 3), padding='same')(up7)
conv7 = layers.BatchNormalization()(conv7)
conv7 = layers.Activation('relu')(conv7)
conv7 = layers.Conv2D(128, (3, 3), padding='same')(conv7)
conv7 = layers.BatchNormalization()(conv7)
conv7 = layers.Activation('relu')(conv7)
up8 = layers.concatenate([layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = layers.Conv2D(64, (3, 3), padding='same')(up8)
conv8 = layers.BatchNormalization()(conv8)
conv8 = layers.Activation('relu')(conv8)
conv8 = layers.Conv2D(64, (3, 3), padding='same')(conv8)
conv8 = layers.BatchNormalization()(conv8)
conv8 = layers.Activation('relu')(conv8)
up9 = layers.concatenate([layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = layers.Conv2D(32, (3, 3), padding='same')(up9)
conv9 = layers.BatchNormalization()(conv9)
conv9 = layers.Activation('relu')(conv9)
conv9 = layers.Conv2D(32, (3, 3), padding='same')(conv9)
conv9 = layers.BatchNormalization()(conv9)
conv9 = layers.Activation('relu')(conv9)
out = layers.Conv2D(1, (1, 1))(conv9)
# Output
output = layers.Activation(ACTIVATION)(out)
# Compile model with inputs and outputs
model = models.Model(inputs=[inputs], outputs=[output])
return model
model_unet = get_unet()
model_unet.summary()
Model: "model_3"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_4 (InputLayer) [(None, 160, 160, 3) 0
__________________________________________________________________________________________________
conv2d_57 (Conv2D) (None, 160, 160, 32) 896 input_4[0][0]
__________________________________________________________________________________________________
batch_normalization_54 (BatchNo (None, 160, 160, 32) 128 conv2d_57[0][0]
__________________________________________________________________________________________________
activation_57 (Activation) (None, 160, 160, 32) 0 batch_normalization_54[0][0]
__________________________________________________________________________________________________
conv2d_58 (Conv2D) (None, 160, 160, 32) 9248 activation_57[0][0]
__________________________________________________________________________________________________
batch_normalization_55 (BatchNo (None, 160, 160, 32) 128 conv2d_58[0][0]
__________________________________________________________________________________________________
activation_58 (Activation) (None, 160, 160, 32) 0 batch_normalization_55[0][0]
__________________________________________________________________________________________________
max_pooling2d_12 (MaxPooling2D) (None, 80, 80, 32) 0 activation_58[0][0]
__________________________________________________________________________________________________
conv2d_59 (Conv2D) (None, 80, 80, 64) 18496 max_pooling2d_12[0][0]
__________________________________________________________________________________________________
batch_normalization_56 (BatchNo (None, 80, 80, 64) 256 conv2d_59[0][0]
__________________________________________________________________________________________________
activation_59 (Activation) (None, 80, 80, 64) 0 batch_normalization_56[0][0]
__________________________________________________________________________________________________
conv2d_60 (Conv2D) (None, 80, 80, 64) 36928 activation_59[0][0]
__________________________________________________________________________________________________
batch_normalization_57 (BatchNo (None, 80, 80, 64) 256 conv2d_60[0][0]
__________________________________________________________________________________________________
activation_60 (Activation) (None, 80, 80, 64) 0 batch_normalization_57[0][0]
__________________________________________________________________________________________________
max_pooling2d_13 (MaxPooling2D) (None, 40, 40, 64) 0 activation_60[0][0]
__________________________________________________________________________________________________
conv2d_61 (Conv2D) (None, 40, 40, 128) 73856 max_pooling2d_13[0][0]
__________________________________________________________________________________________________
batch_normalization_58 (BatchNo (None, 40, 40, 128) 512 conv2d_61[0][0]
__________________________________________________________________________________________________
activation_61 (Activation) (None, 40, 40, 128) 0 batch_normalization_58[0][0]
__________________________________________________________________________________________________
conv2d_62 (Conv2D) (None, 40, 40, 128) 147584 activation_61[0][0]
__________________________________________________________________________________________________
batch_normalization_59 (BatchNo (None, 40, 40, 128) 512 conv2d_62[0][0]
__________________________________________________________________________________________________
activation_62 (Activation) (None, 40, 40, 128) 0 batch_normalization_59[0][0]
__________________________________________________________________________________________________
max_pooling2d_14 (MaxPooling2D) (None, 20, 20, 128) 0 activation_62[0][0]
__________________________________________________________________________________________________
conv2d_63 (Conv2D) (None, 20, 20, 256) 295168 max_pooling2d_14[0][0]
__________________________________________________________________________________________________
batch_normalization_60 (BatchNo (None, 20, 20, 256) 1024 conv2d_63[0][0]
__________________________________________________________________________________________________
activation_63 (Activation) (None, 20, 20, 256) 0 batch_normalization_60[0][0]
__________________________________________________________________________________________________
conv2d_64 (Conv2D) (None, 20, 20, 256) 590080 activation_63[0][0]
__________________________________________________________________________________________________
batch_normalization_61 (BatchNo (None, 20, 20, 256) 1024 conv2d_64[0][0]
__________________________________________________________________________________________________
activation_64 (Activation) (None, 20, 20, 256) 0 batch_normalization_61[0][0]
__________________________________________________________________________________________________
max_pooling2d_15 (MaxPooling2D) (None, 10, 10, 256) 0 activation_64[0][0]
__________________________________________________________________________________________________
conv2d_65 (Conv2D) (None, 10, 10, 512) 1180160 max_pooling2d_15[0][0]
__________________________________________________________________________________________________
batch_normalization_62 (BatchNo (None, 10, 10, 512) 2048 conv2d_65[0][0]
__________________________________________________________________________________________________
activation_65 (Activation) (None, 10, 10, 512) 0 batch_normalization_62[0][0]
__________________________________________________________________________________________________
conv2d_66 (Conv2D) (None, 10, 10, 512) 2359808 activation_65[0][0]
__________________________________________________________________________________________________
batch_normalization_63 (BatchNo (None, 10, 10, 512) 2048 conv2d_66[0][0]
__________________________________________________________________________________________________
activation_66 (Activation) (None, 10, 10, 512) 0 batch_normalization_63[0][0]
__________________________________________________________________________________________________
conv2d_transpose_12 (Conv2DTran (None, 20, 20, 256) 524544 activation_66[0][0]
__________________________________________________________________________________________________
concatenate_12 (Concatenate) (None, 20, 20, 512) 0 conv2d_transpose_12[0][0]
activation_64[0][0]
__________________________________________________________________________________________________
conv2d_67 (Conv2D) (None, 20, 20, 256) 1179904 concatenate_12[0][0]
__________________________________________________________________________________________________
batch_normalization_64 (BatchNo (None, 20, 20, 256) 1024 conv2d_67[0][0]
__________________________________________________________________________________________________
activation_67 (Activation) (None, 20, 20, 256) 0 batch_normalization_64[0][0]
__________________________________________________________________________________________________
conv2d_68 (Conv2D) (None, 20, 20, 256) 590080 activation_67[0][0]
__________________________________________________________________________________________________
batch_normalization_65 (BatchNo (None, 20, 20, 256) 1024 conv2d_68[0][0]
__________________________________________________________________________________________________
activation_68 (Activation) (None, 20, 20, 256) 0 batch_normalization_65[0][0]
__________________________________________________________________________________________________
conv2d_transpose_13 (Conv2DTran (None, 40, 40, 128) 131200 activation_68[0][0]
__________________________________________________________________________________________________
concatenate_13 (Concatenate) (None, 40, 40, 256) 0 conv2d_transpose_13[0][0]
activation_62[0][0]
__________________________________________________________________________________________________
conv2d_69 (Conv2D) (None, 40, 40, 128) 295040 concatenate_13[0][0]
__________________________________________________________________________________________________
batch_normalization_66 (BatchNo (None, 40, 40, 128) 512 conv2d_69[0][0]
__________________________________________________________________________________________________
activation_69 (Activation) (None, 40, 40, 128) 0 batch_normalization_66[0][0]
__________________________________________________________________________________________________
conv2d_70 (Conv2D) (None, 40, 40, 128) 147584 activation_69[0][0]
__________________________________________________________________________________________________
batch_normalization_67 (BatchNo (None, 40, 40, 128) 512 conv2d_70[0][0]
__________________________________________________________________________________________________
activation_70 (Activation) (None, 40, 40, 128) 0 batch_normalization_67[0][0]
__________________________________________________________________________________________________
conv2d_transpose_14 (Conv2DTran (None, 80, 80, 64) 32832 activation_70[0][0]
__________________________________________________________________________________________________
concatenate_14 (Concatenate) (None, 80, 80, 128) 0 conv2d_transpose_14[0][0]
activation_60[0][0]
__________________________________________________________________________________________________
conv2d_71 (Conv2D) (None, 80, 80, 64) 73792 concatenate_14[0][0]
__________________________________________________________________________________________________
batch_normalization_68 (BatchNo (None, 80, 80, 64) 256 conv2d_71[0][0]
__________________________________________________________________________________________________
activation_71 (Activation) (None, 80, 80, 64) 0 batch_normalization_68[0][0]
__________________________________________________________________________________________________
conv2d_72 (Conv2D) (None, 80, 80, 64) 36928 activation_71[0][0]
__________________________________________________________________________________________________
batch_normalization_69 (BatchNo (None, 80, 80, 64) 256 conv2d_72[0][0]
__________________________________________________________________________________________________
activation_72 (Activation) (None, 80, 80, 64) 0 batch_normalization_69[0][0]
__________________________________________________________________________________________________
conv2d_transpose_15 (Conv2DTran (None, 160, 160, 32) 8224 activation_72[0][0]
__________________________________________________________________________________________________
concatenate_15 (Concatenate) (None, 160, 160, 64) 0 conv2d_transpose_15[0][0]
activation_58[0][0]
__________________________________________________________________________________________________
conv2d_73 (Conv2D) (None, 160, 160, 32) 18464 concatenate_15[0][0]
__________________________________________________________________________________________________
batch_normalization_70 (BatchNo (None, 160, 160, 32) 128 conv2d_73[0][0]
__________________________________________________________________________________________________
activation_73 (Activation) (None, 160, 160, 32) 0 batch_normalization_70[0][0]
__________________________________________________________________________________________________
conv2d_74 (Conv2D) (None, 160, 160, 32) 9248 activation_73[0][0]
__________________________________________________________________________________________________
batch_normalization_71 (BatchNo (None, 160, 160, 32) 128 conv2d_74[0][0]
__________________________________________________________________________________________________
activation_74 (Activation) (None, 160, 160, 32) 0 batch_normalization_71[0][0]
__________________________________________________________________________________________________
conv2d_75 (Conv2D) (None, 160, 160, 1) 33 activation_74[0][0]
__________________________________________________________________________________________________
activation_75 (Activation) (None, 160, 160, 1) 0 conv2d_75[0][0]
==================================================================================================
Total params: 7,771,873
Trainable params: 7,765,985
Non-trainable params: 5,888
__________________________________________________________________________________________________
# smooth para evitar dividir por 0
smooth = 1.
def dice_coef(targets, inputs):
inputs = K.flatten(inputs)
targets = K.flatten(targets)
intersection = K.sum(targets * inputs)
dice = (2*intersection + smooth) / (K.sum(targets) + K.sum(inputs) + smooth)
return dice
def iou(targets, inputs):
inputs = K.flatten(inputs)
targets = K.flatten(targets)
intersection = K.sum(targets * inputs)
total = K.sum(targets) + K.sum(inputs)
union = total - intersection
IoU = (intersection + smooth) / (union + smooth)
return IoU
model_unet.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', dice_coef, iou])
path_models = root_path + '/segmentation_binaria/' + 'models'
path_experiment = path_models + 'Train'
EPOCHS = 12
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=path_experiment + 'model.h5',
monitor='dice_coef',
mode='max',
save_best_only=True,
verbose=1)
history_unet = model_unet.fit(train_dataset,
epochs=EPOCHS,
validation_data=validation_dataset,
callbacks=[model_checkpoint_callback],
verbose=1)
Epoch 1/12
63/63 [==============================] - 33s 521ms/step - loss: 0.0513 - accuracy: 0.9807 - dice_coef: 0.9373 - iou: 0.8820 - val_loss: 0.1809 - val_accuracy: 0.9281 - val_dice_coef: 0.8216 - val_iou: 0.6974
Epoch 00001: dice_coef improved from -inf to 0.93727, saving model to lung_segmentation/segmentation_binaria/modelsTrainmodel.h5
Epoch 2/12
63/63 [==============================] - 32s 512ms/step - loss: 0.0437 - accuracy: 0.9832 - dice_coef: 0.9473 - iou: 0.8999 - val_loss: 0.5095 - val_accuracy: 0.8340 - val_dice_coef: 0.7374 - val_iou: 0.5842
Epoch 00002: dice_coef improved from 0.93727 to 0.94730, saving model to lung_segmentation/segmentation_binaria/modelsTrainmodel.h5
Epoch 3/12
63/63 [==============================] - 32s 511ms/step - loss: 0.0384 - accuracy: 0.9849 - dice_coef: 0.9538 - iou: 0.9117 - val_loss: 0.1641 - val_accuracy: 0.9446 - val_dice_coef: 0.8622 - val_iou: 0.7581
Epoch 00003: dice_coef improved from 0.94730 to 0.95377, saving model to lung_segmentation/segmentation_binaria/modelsTrainmodel.h5
Epoch 4/12
63/63 [==============================] - 32s 514ms/step - loss: 0.0334 - accuracy: 0.9867 - dice_coef: 0.9600 - iou: 0.9231 - val_loss: 0.1245 - val_accuracy: 0.9568 - val_dice_coef: 0.8934 - val_iou: 0.8078
Epoch 00004: dice_coef improved from 0.95377 to 0.96000, saving model to lung_segmentation/segmentation_binaria/modelsTrainmodel.h5
Epoch 5/12
63/63 [==============================] - 33s 520ms/step - loss: 0.0303 - accuracy: 0.9878 - dice_coef: 0.9638 - iou: 0.9302 - val_loss: 0.0645 - val_accuracy: 0.9758 - val_dice_coef: 0.9382 - val_iou: 0.8837
Epoch 00005: dice_coef improved from 0.96000 to 0.96383, saving model to lung_segmentation/segmentation_binaria/modelsTrainmodel.h5
Epoch 6/12
63/63 [==============================] - 33s 520ms/step - loss: 0.0279 - accuracy: 0.9886 - dice_coef: 0.9667 - iou: 0.9355 - val_loss: 0.0536 - val_accuracy: 0.9794 - val_dice_coef: 0.9466 - val_iou: 0.8986
Epoch 00006: dice_coef improved from 0.96383 to 0.96668, saving model to lung_segmentation/segmentation_binaria/modelsTrainmodel.h5
Epoch 7/12
63/63 [==============================] - 32s 516ms/step - loss: 0.0257 - accuracy: 0.9894 - dice_coef: 0.9694 - iou: 0.9406 - val_loss: 0.0490 - val_accuracy: 0.9815 - val_dice_coef: 0.9530 - val_iou: 0.9103
Epoch 00007: dice_coef improved from 0.96668 to 0.96941, saving model to lung_segmentation/segmentation_binaria/modelsTrainmodel.h5
Epoch 8/12
63/63 [==============================] - 32s 511ms/step - loss: 0.0238 - accuracy: 0.9902 - dice_coef: 0.9715 - iou: 0.9446 - val_loss: 0.0457 - val_accuracy: 0.9829 - val_dice_coef: 0.9573 - val_iou: 0.9182
Epoch 00008: dice_coef improved from 0.96941 to 0.97152, saving model to lung_segmentation/segmentation_binaria/modelsTrainmodel.h5
Epoch 9/12
63/63 [==============================] - 32s 512ms/step - loss: 0.0219 - accuracy: 0.9909 - dice_coef: 0.9739 - iou: 0.9492 - val_loss: 0.0701 - val_accuracy: 0.9756 - val_dice_coef: 0.9433 - val_iou: 0.8928
Epoch 00009: dice_coef improved from 0.97152 to 0.97395, saving model to lung_segmentation/segmentation_binaria/modelsTrainmodel.h5
Epoch 10/12
63/63 [==============================] - 32s 512ms/step - loss: 0.0241 - accuracy: 0.9899 - dice_coef: 0.9717 - iou: 0.9450 - val_loss: 0.1654 - val_accuracy: 0.9596 - val_dice_coef: 0.9113 - val_iou: 0.8378
Epoch 00010: dice_coef did not improve from 0.97395
Epoch 11/12
63/63 [==============================] - 32s 512ms/step - loss: 0.0273 - accuracy: 0.9886 - dice_coef: 0.9681 - iou: 0.9381 - val_loss: 0.0562 - val_accuracy: 0.9810 - val_dice_coef: 0.9566 - val_iou: 0.9168
Epoch 00011: dice_coef did not improve from 0.97395
Epoch 12/12
63/63 [==============================] - 32s 511ms/step - loss: 0.0209 - accuracy: 0.9912 - dice_coef: 0.9752 - iou: 0.9515 - val_loss: 0.0393 - val_accuracy: 0.9851 - val_dice_coef: 0.9638 - val_iou: 0.9301
Epoch 00012: dice_coef improved from 0.97395 to 0.97517, saving model to lung_segmentation/segmentation_binaria/modelsTrainmodel.h5
# Representamos la exactitud, coeficiente DICE e IOU en entrenamiento y validación
def show_learning_curves(history):
plt.figure(figsize=(20,5))
# Exactitud
plt.subplot(1, 3, 1)
plt.plot(history.history['accuracy'], label='Entrenamiento')
plt.plot(history.history['val_accuracy'], label='Validación')
plt.xlabel('Época')
plt.ylabel('Exactitud')
plt.title('Exactitud')
plt.ylim([0, 1])
plt.legend()
# Coeficiente DICE
plt.subplot(1, 3, 2)
plt.plot(history.history['dice_coef'], label='Entrenamiento')
plt.plot(history.history['val_dice_coef'], label='Validación')
plt.xlabel('Época')
plt.ylabel('DC')
plt.title('Coeficiente Dice')
plt.ylim([0, 1])
plt.legend()
# Intersección sobre la unión
plt.subplot(1, 3, 3)
plt.plot(history.history['iou'], label='Entrenamiento')
plt.plot(history.history['val_iou'], label='Validación')
plt.xlabel('Época')
plt.ylabel('IOU')
plt.title('Intersección sobre la unión')
plt.ylim([0, 1])
plt.legend()
show_learning_curves(history_unet)
# Observamos visualmente la predicciones comparadas con las segmentaciones de referencia en algunas imágenes del set de test
def show_prediction(model):
for images, masks in test_dataset.take(1):
pred_masks = model.predict(images)
for i in range(3):
image, mask = images[i], masks[i]
pred_mask = pred_masks[i]
plt.figure(figsize=(20,10))
plt.subplot(131)
plt.imshow(image)
plt.axis('off')
plt.title('Imagen')
plt.subplot(132)
plt.imshow(mask[:,:,-1])
plt.axis('off')
plt.title('Máscara real')
plt.subplot(133)
plt.imshow(pred_mask[:,:,-1])
plt.axis('off')
plt.title('Máscara predicha')
plt.show()
show_prediction(model_unet)
model_unet.save("model_unet.h5")
! pip3 install -U segmentation-models
Collecting segmentation-models
Downloading segmentation_models-1.0.1-py3-none-any.whl (33 kB)
Collecting keras-applications<=1.0.8,>=1.0.7
Downloading Keras_Applications-1.0.8-py3-none-any.whl (50 kB)
|████████████████████████████████| 50 kB 8.2 MB/s eta 0:00:01
Collecting efficientnet==1.0.0
Downloading efficientnet-1.0.0-py3-none-any.whl (17 kB)
Collecting image-classifiers==1.0.0
Downloading image_classifiers-1.0.0-py3-none-any.whl (19 kB)
Requirement already satisfied, skipping upgrade: h5py in /home/julianmelero/.local/lib/python3.8/site-packages (from keras-applications<=1.0.8,>=1.0.7->segmentation-models) (2.10.0)
Requirement already satisfied, skipping upgrade: numpy>=1.9.1 in /home/julianmelero/.local/lib/python3.8/site-packages (from keras-applications<=1.0.8,>=1.0.7->segmentation-models) (1.19.5)
Requirement already satisfied, skipping upgrade: scikit-image in /home/julianmelero/.local/lib/python3.8/site-packages (from efficientnet==1.0.0->segmentation-models) (0.19.2)
Requirement already satisfied, skipping upgrade: six in /home/julianmelero/.local/lib/python3.8/site-packages (from h5py->keras-applications<=1.0.8,>=1.0.7->segmentation-models) (1.15.0)
Requirement already satisfied, skipping upgrade: networkx>=2.2 in /home/julianmelero/.local/lib/python3.8/site-packages (from scikit-image->efficientnet==1.0.0->segmentation-models) (2.6.3)
Requirement already satisfied, skipping upgrade: imageio>=2.4.1 in /home/julianmelero/.local/lib/python3.8/site-packages (from scikit-image->efficientnet==1.0.0->segmentation-models) (2.19.2)
Requirement already satisfied, skipping upgrade: tifffile>=2019.7.26 in /home/julianmelero/.local/lib/python3.8/site-packages (from scikit-image->efficientnet==1.0.0->segmentation-models) (2022.5.4)
Requirement already satisfied, skipping upgrade: PyWavelets>=1.1.1 in /home/julianmelero/.local/lib/python3.8/site-packages (from scikit-image->efficientnet==1.0.0->segmentation-models) (1.3.0)
Requirement already satisfied, skipping upgrade: packaging>=20.0 in /home/julianmelero/.local/lib/python3.8/site-packages (from scikit-image->efficientnet==1.0.0->segmentation-models) (21.3)
Requirement already satisfied, skipping upgrade: pillow!=7.1.0,!=7.1.1,!=8.3.0,>=6.1.0 in /home/julianmelero/.local/lib/python3.8/site-packages (from scikit-image->efficientnet==1.0.0->segmentation-models) (9.0.0)
Requirement already satisfied, skipping upgrade: scipy>=1.4.1 in /home/julianmelero/.local/lib/python3.8/site-packages (from scikit-image->efficientnet==1.0.0->segmentation-models) (1.7.3)
Requirement already satisfied, skipping upgrade: pyparsing!=3.0.5,>=2.0.2 in /home/julianmelero/.local/lib/python3.8/site-packages (from packaging>=20.0->scikit-image->efficientnet==1.0.0->segmentation-models) (3.0.6)
Installing collected packages: keras-applications, efficientnet, image-classifiers, segmentation-models
Successfully installed efficientnet-1.0.0 image-classifiers-1.0.0 keras-applications-1.0.8 segmentation-models-1.0.1
import segmentation_models as sm
import tensorflow as tf
sm.set_framework('tf.keras')
BACKBONE = 'mobilenetv2'
preprocess_input = sm.get_preprocessing(BACKBONE)
N_ROWS = 160
N_COLS = 160
N_LABELS = 1
def read_image_transf(image_path, mask_path):
# Leemos la imagen del directorio
image = tf.io.read_file(image_path)
# Decodificamos la imagen en función del tipo de codificación (PNG) en nuestro caso
image = tf.image.decode_png(image, channels=3)
# Convertimos la imagen a tipo de datos en coma flotante para evitar que la normalización nos de error
image = tf.cast(image, tf.float32)
# Normalizamos la imagen con la capa de referencia de MobilenetV2
image = preprocess_input(image)
# Redimensionamos la imagen al tamaño deseado
image = tf.image.resize(image, (N_ROWS, N_COLS), method='nearest')
# Leemos la máscara del directorio
mask = tf.io.read_file(mask_path)
# Decodificamos la imagen en función del tipo de codificación (PNG) en nuestro caso
mask = tf.image.decode_png(mask, channels=3)
# Las máscaras están guardadas de manera que el número de la etiqueta (0-12) se encuentra en el primer canal
# Por lo tanto, nos quedamos con el valor máximo de cada canal para crear una matriz de tamaño [FILAS, COLUMNAS, 1]
mask = tf.math.reduce_max(mask, axis=-1, keepdims=True)
# Redimensionamos la imagen al tamaño deseado
mask = tf.image.resize(mask, (N_ROWS, N_COLS), method='nearest')
# Eliminamos el último canal para quedarnos con matriz [FILAS, COLUMNAS]
mask = tf.squeeze(mask, axis=-1)
# Codificamos matriz a OneHot
mask = tf.one_hot(tf.cast(mask, tf.int32), N_LABELS)
return image, mask
def dataset_generator_transf(image_paths, mask_paths, buffer_size, batch_size):
image_list = tf.constant(image_paths)
mask_list = tf.constant(mask_paths)
dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
# A cada elemento del dataset (directorios imagenes y máscaras) les aplicamos el método read_image_transf
dataset = dataset.map(read_image_transf, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.cache().shuffle(buffer_size).batch(batch_size)
return dataset
train_dataset = dataset_generator_transf(train_image_paths, train_mask_paths, BUFFER_SIZE, BATCH_SIZE)
validation_dataset = dataset_generator_transf(val_image_paths, val_mask_paths, BUFFER_SIZE, BATCH_SIZE)
test_dataset = dataset_generator_transf(test_image_paths, test_mask_paths, BUFFER_SIZE, BATCH_SIZE)
model_tl_unet = sm.Unet(BACKBONE, input_shape=(N_COLS, N_ROWS, 3), classes=N_LABELS, activation='sigmoid', encoder_weights='imagenet', encoder_freeze=True)
model_tl_unet.summary()
Model: "model_4"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_5 (InputLayer) [(None, 160, 160, 3) 0
__________________________________________________________________________________________________
Conv1_pad (ZeroPadding2D) (None, 161, 161, 3) 0 input_5[0][0]
__________________________________________________________________________________________________
Conv1 (Conv2D) (None, 80, 80, 32) 864 Conv1_pad[0][0]
__________________________________________________________________________________________________
bn_Conv1 (BatchNormalization) (None, 80, 80, 32) 128 Conv1[0][0]
__________________________________________________________________________________________________
Conv1_relu (ReLU) (None, 80, 80, 32) 0 bn_Conv1[0][0]
__________________________________________________________________________________________________
expanded_conv_depthwise (Depthw (None, 80, 80, 32) 288 Conv1_relu[0][0]
__________________________________________________________________________________________________
expanded_conv_depthwise_BN (Bat (None, 80, 80, 32) 128 expanded_conv_depthwise[0][0]
__________________________________________________________________________________________________
expanded_conv_depthwise_relu (R (None, 80, 80, 32) 0 expanded_conv_depthwise_BN[0][0]
__________________________________________________________________________________________________
expanded_conv_project (Conv2D) (None, 80, 80, 16) 512 expanded_conv_depthwise_relu[0][0
__________________________________________________________________________________________________
expanded_conv_project_BN (Batch (None, 80, 80, 16) 64 expanded_conv_project[0][0]
__________________________________________________________________________________________________
block_1_expand (Conv2D) (None, 80, 80, 96) 1536 expanded_conv_project_BN[0][0]
__________________________________________________________________________________________________
block_1_expand_BN (BatchNormali (None, 80, 80, 96) 384 block_1_expand[0][0]
__________________________________________________________________________________________________
block_1_expand_relu (ReLU) (None, 80, 80, 96) 0 block_1_expand_BN[0][0]
__________________________________________________________________________________________________
block_1_pad (ZeroPadding2D) (None, 81, 81, 96) 0 block_1_expand_relu[0][0]
__________________________________________________________________________________________________
block_1_depthwise (DepthwiseCon (None, 40, 40, 96) 864 block_1_pad[0][0]
__________________________________________________________________________________________________
block_1_depthwise_BN (BatchNorm (None, 40, 40, 96) 384 block_1_depthwise[0][0]
__________________________________________________________________________________________________
block_1_depthwise_relu (ReLU) (None, 40, 40, 96) 0 block_1_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_1_project (Conv2D) (None, 40, 40, 24) 2304 block_1_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_1_project_BN (BatchNormal (None, 40, 40, 24) 96 block_1_project[0][0]
__________________________________________________________________________________________________
block_2_expand (Conv2D) (None, 40, 40, 144) 3456 block_1_project_BN[0][0]
__________________________________________________________________________________________________
block_2_expand_BN (BatchNormali (None, 40, 40, 144) 576 block_2_expand[0][0]
__________________________________________________________________________________________________
block_2_expand_relu (ReLU) (None, 40, 40, 144) 0 block_2_expand_BN[0][0]
__________________________________________________________________________________________________
block_2_depthwise (DepthwiseCon (None, 40, 40, 144) 1296 block_2_expand_relu[0][0]
__________________________________________________________________________________________________
block_2_depthwise_BN (BatchNorm (None, 40, 40, 144) 576 block_2_depthwise[0][0]
__________________________________________________________________________________________________
block_2_depthwise_relu (ReLU) (None, 40, 40, 144) 0 block_2_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_2_project (Conv2D) (None, 40, 40, 24) 3456 block_2_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_2_project_BN (BatchNormal (None, 40, 40, 24) 96 block_2_project[0][0]
__________________________________________________________________________________________________
block_2_add (Add) (None, 40, 40, 24) 0 block_1_project_BN[0][0]
block_2_project_BN[0][0]
__________________________________________________________________________________________________
block_3_expand (Conv2D) (None, 40, 40, 144) 3456 block_2_add[0][0]
__________________________________________________________________________________________________
block_3_expand_BN (BatchNormali (None, 40, 40, 144) 576 block_3_expand[0][0]
__________________________________________________________________________________________________
block_3_expand_relu (ReLU) (None, 40, 40, 144) 0 block_3_expand_BN[0][0]
__________________________________________________________________________________________________
block_3_pad (ZeroPadding2D) (None, 41, 41, 144) 0 block_3_expand_relu[0][0]
__________________________________________________________________________________________________
block_3_depthwise (DepthwiseCon (None, 20, 20, 144) 1296 block_3_pad[0][0]
__________________________________________________________________________________________________
block_3_depthwise_BN (BatchNorm (None, 20, 20, 144) 576 block_3_depthwise[0][0]
__________________________________________________________________________________________________
block_3_depthwise_relu (ReLU) (None, 20, 20, 144) 0 block_3_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_3_project (Conv2D) (None, 20, 20, 32) 4608 block_3_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_3_project_BN (BatchNormal (None, 20, 20, 32) 128 block_3_project[0][0]
__________________________________________________________________________________________________
block_4_expand (Conv2D) (None, 20, 20, 192) 6144 block_3_project_BN[0][0]
__________________________________________________________________________________________________
block_4_expand_BN (BatchNormali (None, 20, 20, 192) 768 block_4_expand[0][0]
__________________________________________________________________________________________________
block_4_expand_relu (ReLU) (None, 20, 20, 192) 0 block_4_expand_BN[0][0]
__________________________________________________________________________________________________
block_4_depthwise (DepthwiseCon (None, 20, 20, 192) 1728 block_4_expand_relu[0][0]
__________________________________________________________________________________________________
block_4_depthwise_BN (BatchNorm (None, 20, 20, 192) 768 block_4_depthwise[0][0]
__________________________________________________________________________________________________
block_4_depthwise_relu (ReLU) (None, 20, 20, 192) 0 block_4_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_4_project (Conv2D) (None, 20, 20, 32) 6144 block_4_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_4_project_BN (BatchNormal (None, 20, 20, 32) 128 block_4_project[0][0]
__________________________________________________________________________________________________
block_4_add (Add) (None, 20, 20, 32) 0 block_3_project_BN[0][0]
block_4_project_BN[0][0]
__________________________________________________________________________________________________
block_5_expand (Conv2D) (None, 20, 20, 192) 6144 block_4_add[0][0]
__________________________________________________________________________________________________
block_5_expand_BN (BatchNormali (None, 20, 20, 192) 768 block_5_expand[0][0]
__________________________________________________________________________________________________
block_5_expand_relu (ReLU) (None, 20, 20, 192) 0 block_5_expand_BN[0][0]
__________________________________________________________________________________________________
block_5_depthwise (DepthwiseCon (None, 20, 20, 192) 1728 block_5_expand_relu[0][0]
__________________________________________________________________________________________________
block_5_depthwise_BN (BatchNorm (None, 20, 20, 192) 768 block_5_depthwise[0][0]
__________________________________________________________________________________________________
block_5_depthwise_relu (ReLU) (None, 20, 20, 192) 0 block_5_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_5_project (Conv2D) (None, 20, 20, 32) 6144 block_5_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_5_project_BN (BatchNormal (None, 20, 20, 32) 128 block_5_project[0][0]
__________________________________________________________________________________________________
block_5_add (Add) (None, 20, 20, 32) 0 block_4_add[0][0]
block_5_project_BN[0][0]
__________________________________________________________________________________________________
block_6_expand (Conv2D) (None, 20, 20, 192) 6144 block_5_add[0][0]
__________________________________________________________________________________________________
block_6_expand_BN (BatchNormali (None, 20, 20, 192) 768 block_6_expand[0][0]
__________________________________________________________________________________________________
block_6_expand_relu (ReLU) (None, 20, 20, 192) 0 block_6_expand_BN[0][0]
__________________________________________________________________________________________________
block_6_pad (ZeroPadding2D) (None, 21, 21, 192) 0 block_6_expand_relu[0][0]
__________________________________________________________________________________________________
block_6_depthwise (DepthwiseCon (None, 10, 10, 192) 1728 block_6_pad[0][0]
__________________________________________________________________________________________________
block_6_depthwise_BN (BatchNorm (None, 10, 10, 192) 768 block_6_depthwise[0][0]
__________________________________________________________________________________________________
block_6_depthwise_relu (ReLU) (None, 10, 10, 192) 0 block_6_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_6_project (Conv2D) (None, 10, 10, 64) 12288 block_6_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_6_project_BN (BatchNormal (None, 10, 10, 64) 256 block_6_project[0][0]
__________________________________________________________________________________________________
block_7_expand (Conv2D) (None, 10, 10, 384) 24576 block_6_project_BN[0][0]
__________________________________________________________________________________________________
block_7_expand_BN (BatchNormali (None, 10, 10, 384) 1536 block_7_expand[0][0]
__________________________________________________________________________________________________
block_7_expand_relu (ReLU) (None, 10, 10, 384) 0 block_7_expand_BN[0][0]
__________________________________________________________________________________________________
block_7_depthwise (DepthwiseCon (None, 10, 10, 384) 3456 block_7_expand_relu[0][0]
__________________________________________________________________________________________________
block_7_depthwise_BN (BatchNorm (None, 10, 10, 384) 1536 block_7_depthwise[0][0]
__________________________________________________________________________________________________
block_7_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_7_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_7_project (Conv2D) (None, 10, 10, 64) 24576 block_7_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_7_project_BN (BatchNormal (None, 10, 10, 64) 256 block_7_project[0][0]
__________________________________________________________________________________________________
block_7_add (Add) (None, 10, 10, 64) 0 block_6_project_BN[0][0]
block_7_project_BN[0][0]
__________________________________________________________________________________________________
block_8_expand (Conv2D) (None, 10, 10, 384) 24576 block_7_add[0][0]
__________________________________________________________________________________________________
block_8_expand_BN (BatchNormali (None, 10, 10, 384) 1536 block_8_expand[0][0]
__________________________________________________________________________________________________
block_8_expand_relu (ReLU) (None, 10, 10, 384) 0 block_8_expand_BN[0][0]
__________________________________________________________________________________________________
block_8_depthwise (DepthwiseCon (None, 10, 10, 384) 3456 block_8_expand_relu[0][0]
__________________________________________________________________________________________________
block_8_depthwise_BN (BatchNorm (None, 10, 10, 384) 1536 block_8_depthwise[0][0]
__________________________________________________________________________________________________
block_8_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_8_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_8_project (Conv2D) (None, 10, 10, 64) 24576 block_8_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_8_project_BN (BatchNormal (None, 10, 10, 64) 256 block_8_project[0][0]
__________________________________________________________________________________________________
block_8_add (Add) (None, 10, 10, 64) 0 block_7_add[0][0]
block_8_project_BN[0][0]
__________________________________________________________________________________________________
block_9_expand (Conv2D) (None, 10, 10, 384) 24576 block_8_add[0][0]
__________________________________________________________________________________________________
block_9_expand_BN (BatchNormali (None, 10, 10, 384) 1536 block_9_expand[0][0]
__________________________________________________________________________________________________
block_9_expand_relu (ReLU) (None, 10, 10, 384) 0 block_9_expand_BN[0][0]
__________________________________________________________________________________________________
block_9_depthwise (DepthwiseCon (None, 10, 10, 384) 3456 block_9_expand_relu[0][0]
__________________________________________________________________________________________________
block_9_depthwise_BN (BatchNorm (None, 10, 10, 384) 1536 block_9_depthwise[0][0]
__________________________________________________________________________________________________
block_9_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_9_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_9_project (Conv2D) (None, 10, 10, 64) 24576 block_9_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_9_project_BN (BatchNormal (None, 10, 10, 64) 256 block_9_project[0][0]
__________________________________________________________________________________________________
block_9_add (Add) (None, 10, 10, 64) 0 block_8_add[0][0]
block_9_project_BN[0][0]
__________________________________________________________________________________________________
block_10_expand (Conv2D) (None, 10, 10, 384) 24576 block_9_add[0][0]
__________________________________________________________________________________________________
block_10_expand_BN (BatchNormal (None, 10, 10, 384) 1536 block_10_expand[0][0]
__________________________________________________________________________________________________
block_10_expand_relu (ReLU) (None, 10, 10, 384) 0 block_10_expand_BN[0][0]
__________________________________________________________________________________________________
block_10_depthwise (DepthwiseCo (None, 10, 10, 384) 3456 block_10_expand_relu[0][0]
__________________________________________________________________________________________________
block_10_depthwise_BN (BatchNor (None, 10, 10, 384) 1536 block_10_depthwise[0][0]
__________________________________________________________________________________________________
block_10_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_10_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_10_project (Conv2D) (None, 10, 10, 96) 36864 block_10_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_10_project_BN (BatchNorma (None, 10, 10, 96) 384 block_10_project[0][0]
__________________________________________________________________________________________________
block_11_expand (Conv2D) (None, 10, 10, 576) 55296 block_10_project_BN[0][0]
__________________________________________________________________________________________________
block_11_expand_BN (BatchNormal (None, 10, 10, 576) 2304 block_11_expand[0][0]
__________________________________________________________________________________________________
block_11_expand_relu (ReLU) (None, 10, 10, 576) 0 block_11_expand_BN[0][0]
__________________________________________________________________________________________________
block_11_depthwise (DepthwiseCo (None, 10, 10, 576) 5184 block_11_expand_relu[0][0]
__________________________________________________________________________________________________
block_11_depthwise_BN (BatchNor (None, 10, 10, 576) 2304 block_11_depthwise[0][0]
__________________________________________________________________________________________________
block_11_depthwise_relu (ReLU) (None, 10, 10, 576) 0 block_11_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_11_project (Conv2D) (None, 10, 10, 96) 55296 block_11_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_11_project_BN (BatchNorma (None, 10, 10, 96) 384 block_11_project[0][0]
__________________________________________________________________________________________________
block_11_add (Add) (None, 10, 10, 96) 0 block_10_project_BN[0][0]
block_11_project_BN[0][0]
__________________________________________________________________________________________________
block_12_expand (Conv2D) (None, 10, 10, 576) 55296 block_11_add[0][0]
__________________________________________________________________________________________________
block_12_expand_BN (BatchNormal (None, 10, 10, 576) 2304 block_12_expand[0][0]
__________________________________________________________________________________________________
block_12_expand_relu (ReLU) (None, 10, 10, 576) 0 block_12_expand_BN[0][0]
__________________________________________________________________________________________________
block_12_depthwise (DepthwiseCo (None, 10, 10, 576) 5184 block_12_expand_relu[0][0]
__________________________________________________________________________________________________
block_12_depthwise_BN (BatchNor (None, 10, 10, 576) 2304 block_12_depthwise[0][0]
__________________________________________________________________________________________________
block_12_depthwise_relu (ReLU) (None, 10, 10, 576) 0 block_12_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_12_project (Conv2D) (None, 10, 10, 96) 55296 block_12_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_12_project_BN (BatchNorma (None, 10, 10, 96) 384 block_12_project[0][0]
__________________________________________________________________________________________________
block_12_add (Add) (None, 10, 10, 96) 0 block_11_add[0][0]
block_12_project_BN[0][0]
__________________________________________________________________________________________________
block_13_expand (Conv2D) (None, 10, 10, 576) 55296 block_12_add[0][0]
__________________________________________________________________________________________________
block_13_expand_BN (BatchNormal (None, 10, 10, 576) 2304 block_13_expand[0][0]
__________________________________________________________________________________________________
block_13_expand_relu (ReLU) (None, 10, 10, 576) 0 block_13_expand_BN[0][0]
__________________________________________________________________________________________________
block_13_pad (ZeroPadding2D) (None, 11, 11, 576) 0 block_13_expand_relu[0][0]
__________________________________________________________________________________________________
block_13_depthwise (DepthwiseCo (None, 5, 5, 576) 5184 block_13_pad[0][0]
__________________________________________________________________________________________________
block_13_depthwise_BN (BatchNor (None, 5, 5, 576) 2304 block_13_depthwise[0][0]
__________________________________________________________________________________________________
block_13_depthwise_relu (ReLU) (None, 5, 5, 576) 0 block_13_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_13_project (Conv2D) (None, 5, 5, 160) 92160 block_13_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_13_project_BN (BatchNorma (None, 5, 5, 160) 640 block_13_project[0][0]
__________________________________________________________________________________________________
block_14_expand (Conv2D) (None, 5, 5, 960) 153600 block_13_project_BN[0][0]
__________________________________________________________________________________________________
block_14_expand_BN (BatchNormal (None, 5, 5, 960) 3840 block_14_expand[0][0]
__________________________________________________________________________________________________
block_14_expand_relu (ReLU) (None, 5, 5, 960) 0 block_14_expand_BN[0][0]
__________________________________________________________________________________________________
block_14_depthwise (DepthwiseCo (None, 5, 5, 960) 8640 block_14_expand_relu[0][0]
__________________________________________________________________________________________________
block_14_depthwise_BN (BatchNor (None, 5, 5, 960) 3840 block_14_depthwise[0][0]
__________________________________________________________________________________________________
block_14_depthwise_relu (ReLU) (None, 5, 5, 960) 0 block_14_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_14_project (Conv2D) (None, 5, 5, 160) 153600 block_14_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_14_project_BN (BatchNorma (None, 5, 5, 160) 640 block_14_project[0][0]
__________________________________________________________________________________________________
block_14_add (Add) (None, 5, 5, 160) 0 block_13_project_BN[0][0]
block_14_project_BN[0][0]
__________________________________________________________________________________________________
block_15_expand (Conv2D) (None, 5, 5, 960) 153600 block_14_add[0][0]
__________________________________________________________________________________________________
block_15_expand_BN (BatchNormal (None, 5, 5, 960) 3840 block_15_expand[0][0]
__________________________________________________________________________________________________
block_15_expand_relu (ReLU) (None, 5, 5, 960) 0 block_15_expand_BN[0][0]
__________________________________________________________________________________________________
block_15_depthwise (DepthwiseCo (None, 5, 5, 960) 8640 block_15_expand_relu[0][0]
__________________________________________________________________________________________________
block_15_depthwise_BN (BatchNor (None, 5, 5, 960) 3840 block_15_depthwise[0][0]
__________________________________________________________________________________________________
block_15_depthwise_relu (ReLU) (None, 5, 5, 960) 0 block_15_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_15_project (Conv2D) (None, 5, 5, 160) 153600 block_15_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_15_project_BN (BatchNorma (None, 5, 5, 160) 640 block_15_project[0][0]
__________________________________________________________________________________________________
block_15_add (Add) (None, 5, 5, 160) 0 block_14_add[0][0]
block_15_project_BN[0][0]
__________________________________________________________________________________________________
block_16_expand (Conv2D) (None, 5, 5, 960) 153600 block_15_add[0][0]
__________________________________________________________________________________________________
block_16_expand_BN (BatchNormal (None, 5, 5, 960) 3840 block_16_expand[0][0]
__________________________________________________________________________________________________
block_16_expand_relu (ReLU) (None, 5, 5, 960) 0 block_16_expand_BN[0][0]
__________________________________________________________________________________________________
block_16_depthwise (DepthwiseCo (None, 5, 5, 960) 8640 block_16_expand_relu[0][0]
__________________________________________________________________________________________________
block_16_depthwise_BN (BatchNor (None, 5, 5, 960) 3840 block_16_depthwise[0][0]
__________________________________________________________________________________________________
block_16_depthwise_relu (ReLU) (None, 5, 5, 960) 0 block_16_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_16_project (Conv2D) (None, 5, 5, 320) 307200 block_16_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_16_project_BN (BatchNorma (None, 5, 5, 320) 1280 block_16_project[0][0]
__________________________________________________________________________________________________
Conv_1 (Conv2D) (None, 5, 5, 1280) 409600 block_16_project_BN[0][0]
__________________________________________________________________________________________________
Conv_1_bn (BatchNormalization) (None, 5, 5, 1280) 5120 Conv_1[0][0]
__________________________________________________________________________________________________
out_relu (ReLU) (None, 5, 5, 1280) 0 Conv_1_bn[0][0]
__________________________________________________________________________________________________
decoder_stage0_upsampling (UpSa (None, 10, 10, 1280) 0 out_relu[0][0]
__________________________________________________________________________________________________
decoder_stage0_concat (Concaten (None, 10, 10, 1856) 0 decoder_stage0_upsampling[0][0]
block_13_expand_relu[0][0]
__________________________________________________________________________________________________
decoder_stage0a_conv (Conv2D) (None, 10, 10, 256) 4276224 decoder_stage0_concat[0][0]
__________________________________________________________________________________________________
decoder_stage0a_bn (BatchNormal (None, 10, 10, 256) 1024 decoder_stage0a_conv[0][0]
__________________________________________________________________________________________________
decoder_stage0a_relu (Activatio (None, 10, 10, 256) 0 decoder_stage0a_bn[0][0]
__________________________________________________________________________________________________
decoder_stage0b_conv (Conv2D) (None, 10, 10, 256) 589824 decoder_stage0a_relu[0][0]
__________________________________________________________________________________________________
decoder_stage0b_bn (BatchNormal (None, 10, 10, 256) 1024 decoder_stage0b_conv[0][0]
__________________________________________________________________________________________________
decoder_stage0b_relu (Activatio (None, 10, 10, 256) 0 decoder_stage0b_bn[0][0]
__________________________________________________________________________________________________
decoder_stage1_upsampling (UpSa (None, 20, 20, 256) 0 decoder_stage0b_relu[0][0]
__________________________________________________________________________________________________
decoder_stage1_concat (Concaten (None, 20, 20, 448) 0 decoder_stage1_upsampling[0][0]
block_6_expand_relu[0][0]
__________________________________________________________________________________________________
decoder_stage1a_conv (Conv2D) (None, 20, 20, 128) 516096 decoder_stage1_concat[0][0]
__________________________________________________________________________________________________
decoder_stage1a_bn (BatchNormal (None, 20, 20, 128) 512 decoder_stage1a_conv[0][0]
__________________________________________________________________________________________________
decoder_stage1a_relu (Activatio (None, 20, 20, 128) 0 decoder_stage1a_bn[0][0]
__________________________________________________________________________________________________
decoder_stage1b_conv (Conv2D) (None, 20, 20, 128) 147456 decoder_stage1a_relu[0][0]
__________________________________________________________________________________________________
decoder_stage1b_bn (BatchNormal (None, 20, 20, 128) 512 decoder_stage1b_conv[0][0]
__________________________________________________________________________________________________
decoder_stage1b_relu (Activatio (None, 20, 20, 128) 0 decoder_stage1b_bn[0][0]
__________________________________________________________________________________________________
decoder_stage2_upsampling (UpSa (None, 40, 40, 128) 0 decoder_stage1b_relu[0][0]
__________________________________________________________________________________________________
decoder_stage2_concat (Concaten (None, 40, 40, 272) 0 decoder_stage2_upsampling[0][0]
block_3_expand_relu[0][0]
__________________________________________________________________________________________________
decoder_stage2a_conv (Conv2D) (None, 40, 40, 64) 156672 decoder_stage2_concat[0][0]
__________________________________________________________________________________________________
decoder_stage2a_bn (BatchNormal (None, 40, 40, 64) 256 decoder_stage2a_conv[0][0]
__________________________________________________________________________________________________
decoder_stage2a_relu (Activatio (None, 40, 40, 64) 0 decoder_stage2a_bn[0][0]
__________________________________________________________________________________________________
decoder_stage2b_conv (Conv2D) (None, 40, 40, 64) 36864 decoder_stage2a_relu[0][0]
__________________________________________________________________________________________________
decoder_stage2b_bn (BatchNormal (None, 40, 40, 64) 256 decoder_stage2b_conv[0][0]
__________________________________________________________________________________________________
decoder_stage2b_relu (Activatio (None, 40, 40, 64) 0 decoder_stage2b_bn[0][0]
__________________________________________________________________________________________________
decoder_stage3_upsampling (UpSa (None, 80, 80, 64) 0 decoder_stage2b_relu[0][0]
__________________________________________________________________________________________________
decoder_stage3_concat (Concaten (None, 80, 80, 160) 0 decoder_stage3_upsampling[0][0]
block_1_expand_relu[0][0]
__________________________________________________________________________________________________
decoder_stage3a_conv (Conv2D) (None, 80, 80, 32) 46080 decoder_stage3_concat[0][0]
__________________________________________________________________________________________________
decoder_stage3a_bn (BatchNormal (None, 80, 80, 32) 128 decoder_stage3a_conv[0][0]
__________________________________________________________________________________________________
decoder_stage3a_relu (Activatio (None, 80, 80, 32) 0 decoder_stage3a_bn[0][0]
__________________________________________________________________________________________________
decoder_stage3b_conv (Conv2D) (None, 80, 80, 32) 9216 decoder_stage3a_relu[0][0]
__________________________________________________________________________________________________
decoder_stage3b_bn (BatchNormal (None, 80, 80, 32) 128 decoder_stage3b_conv[0][0]
__________________________________________________________________________________________________
decoder_stage3b_relu (Activatio (None, 80, 80, 32) 0 decoder_stage3b_bn[0][0]
__________________________________________________________________________________________________
decoder_stage4_upsampling (UpSa (None, 160, 160, 32) 0 decoder_stage3b_relu[0][0]
__________________________________________________________________________________________________
decoder_stage4a_conv (Conv2D) (None, 160, 160, 16) 4608 decoder_stage4_upsampling[0][0]
__________________________________________________________________________________________________
decoder_stage4a_bn (BatchNormal (None, 160, 160, 16) 64 decoder_stage4a_conv[0][0]
__________________________________________________________________________________________________
decoder_stage4a_relu (Activatio (None, 160, 160, 16) 0 decoder_stage4a_bn[0][0]
__________________________________________________________________________________________________
decoder_stage4b_conv (Conv2D) (None, 160, 160, 16) 2304 decoder_stage4a_relu[0][0]
__________________________________________________________________________________________________
decoder_stage4b_bn (BatchNormal (None, 160, 160, 16) 64 decoder_stage4b_conv[0][0]
__________________________________________________________________________________________________
decoder_stage4b_relu (Activatio (None, 160, 160, 16) 0 decoder_stage4b_bn[0][0]
__________________________________________________________________________________________________
final_conv (Conv2D) (None, 160, 160, 1) 145 decoder_stage4b_relu[0][0]
__________________________________________________________________________________________________
sigmoid (Activation) (None, 160, 160, 1) 0 final_conv[0][0]
==================================================================================================
Total params: 8,047,441
Trainable params: 5,821,585
Non-trainable params: 2,225,856
__________________________________________________________________________________________________
EPOCHS = 12
path_experiment = "lung_segmentation/segmentacion_binaria/models/Train2"
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=path_experiment + 'model_unet_transf.h5',
monitor='val_loss',
mode='min',
save_best_only=True,
verbose=1)
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=5,
verbose=1,
mode='min',
restore_best_weights=False
)
model_tl_unet.compile(optimizer='adam',
loss=sm.losses.DiceLoss(),
metrics=['accuracy', dice_coef])
history_unet_transf = model_tl_unet.fit(train_dataset,
epochs=EPOCHS,
validation_data=validation_dataset,
callbacks=[model_checkpoint_callback, early_stopping],
verbose=1)
Epoch 1/12
2022-05-21 13:59:24.875845: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:116] None of the MLIR optimization passes are enabled (registered 2)
2022-05-21 13:59:24.890019: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2591995000 Hz
2022-05-21 13:59:25.971768: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8
2022-05-21 13:59:28.723523: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11
2022-05-21 13:59:29.448812: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11
2022-05-21 13:59:30.509118: W tensorflow/core/common_runtime/bfc_allocator.cc:248] Allocator (GPU_0_bfc) ran out of memory trying to allocate 2.21GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available.
2022-05-21 13:59:30.509186: W tensorflow/core/common_runtime/bfc_allocator.cc:248] Allocator (GPU_0_bfc) ran out of memory trying to allocate 2.21GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available.
2022-05-21 13:59:34.644372: W tensorflow/core/common_runtime/bfc_allocator.cc:248] Allocator (GPU_0_bfc) ran out of memory trying to allocate 2.21GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available.
2022-05-21 13:59:34.644438: W tensorflow/core/common_runtime/bfc_allocator.cc:248] Allocator (GPU_0_bfc) ran out of memory trying to allocate 2.21GiB with freed_by_count=0. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory were available.
63/63 [==============================] - 48s 573ms/step - loss: 0.2331 - accuracy: 0.8363 - dice_coef: 0.7669 - val_loss: 0.2617 - val_accuracy: 0.7891 - val_dice_coef: 0.7383
Epoch 00001: val_loss improved from inf to 0.26172, saving model to lung_segmentation/segmentacion_binaria/models/Train2model_unet_transf.h5
Epoch 2/12
63/63 [==============================] - 20s 314ms/step - loss: 0.0420 - accuracy: 0.9800 - dice_coef: 0.9580 - val_loss: 0.0677 - val_accuracy: 0.9607 - val_dice_coef: 0.9323
Epoch 00002: val_loss improved from 0.26172 to 0.06769, saving model to lung_segmentation/segmentacion_binaria/models/Train2model_unet_transf.h5
Epoch 3/12
63/63 [==============================] - 20s 314ms/step - loss: 0.0208 - accuracy: 0.9844 - dice_coef: 0.9792 - val_loss: 0.0337 - val_accuracy: 0.9679 - val_dice_coef: 0.9663
Epoch 00003: val_loss improved from 0.06769 to 0.03372, saving model to lung_segmentation/segmentacion_binaria/models/Train2model_unet_transf.h5
Epoch 4/12
63/63 [==============================] - 20s 314ms/step - loss: 0.0151 - accuracy: 0.9860 - dice_coef: 0.9849 - val_loss: 0.0310 - val_accuracy: 0.9619 - val_dice_coef: 0.9690
Epoch 00004: val_loss improved from 0.03372 to 0.03102, saving model to lung_segmentation/segmentacion_binaria/models/Train2model_unet_transf.h5
Epoch 5/12
63/63 [==============================] - 20s 314ms/step - loss: 0.0117 - accuracy: 0.9881 - dice_coef: 0.9883 - val_loss: 0.0335 - val_accuracy: 0.9541 - val_dice_coef: 0.9665
Epoch 00005: val_loss did not improve from 0.03102
Epoch 6/12
63/63 [==============================] - 20s 318ms/step - loss: 0.0105 - accuracy: 0.9883 - dice_coef: 0.9895 - val_loss: 0.0312 - val_accuracy: 0.9565 - val_dice_coef: 0.9688
Epoch 00006: val_loss did not improve from 0.03102
Epoch 7/12
63/63 [==============================] - 20s 315ms/step - loss: 0.0091 - accuracy: 0.9895 - dice_coef: 0.9909 - val_loss: 0.0280 - val_accuracy: 0.9601 - val_dice_coef: 0.9720
Epoch 00007: val_loss improved from 0.03102 to 0.02802, saving model to lung_segmentation/segmentacion_binaria/models/Train2model_unet_transf.h5
Epoch 8/12
63/63 [==============================] - 20s 315ms/step - loss: 0.0084 - accuracy: 0.9899 - dice_coef: 0.9916 - val_loss: 0.0208 - val_accuracy: 0.9707 - val_dice_coef: 0.9792
Epoch 00008: val_loss improved from 0.02802 to 0.02081, saving model to lung_segmentation/segmentacion_binaria/models/Train2model_unet_transf.h5
Epoch 9/12
63/63 [==============================] - 20s 316ms/step - loss: 0.0078 - accuracy: 0.9904 - dice_coef: 0.9922 - val_loss: 0.0220 - val_accuracy: 0.9685 - val_dice_coef: 0.9780
Epoch 00009: val_loss did not improve from 0.02081
Epoch 10/12
63/63 [==============================] - 20s 315ms/step - loss: 0.0071 - accuracy: 0.9912 - dice_coef: 0.9929 - val_loss: 0.0145 - val_accuracy: 0.9798 - val_dice_coef: 0.9855
Epoch 00010: val_loss improved from 0.02081 to 0.01453, saving model to lung_segmentation/segmentacion_binaria/models/Train2model_unet_transf.h5
Epoch 11/12
63/63 [==============================] - 20s 315ms/step - loss: 0.0066 - accuracy: 0.9917 - dice_coef: 0.9934 - val_loss: 0.0154 - val_accuracy: 0.9785 - val_dice_coef: 0.9846
Epoch 00011: val_loss did not improve from 0.01453
Epoch 12/12
63/63 [==============================] - 20s 320ms/step - loss: 0.0066 - accuracy: 0.9915 - dice_coef: 0.9934 - val_loss: 0.0143 - val_accuracy: 0.9800 - val_dice_coef: 0.9857
Epoch 00012: val_loss improved from 0.01453 to 0.01429, saving model to lung_segmentation/segmentacion_binaria/models/Train2model_unet_transf.h5
# Representamos la exactitud, coeficiente DICE y las pérdidas en entrenamiento y validación
def show_learning_curves_transf(history):
plt.figure(figsize=(20,5))
# Exactitud
plt.subplot(1, 3, 1)
plt.plot(history.history['accuracy'], label='Entrenamiento')
plt.plot(history.history['val_accuracy'], label='Validación')
plt.xlabel('Época')
plt.ylabel('Exactitud')
plt.title('Exactitud')
plt.ylim([0, 1])
plt.legend()
# Coeficiente DICE
plt.subplot(1, 3, 2)
plt.plot(history.history['dice_coef'], label='Entrenamiento')
plt.plot(history.history['val_dice_coef'], label='Validación')
plt.xlabel('Época')
plt.ylabel('Coeficiente Dice')
plt.title('Coeficiente Dice')
plt.ylim([0, 1])
plt.legend()
# Pérdidas
plt.subplot(1, 3, 3)
plt.plot(history.history['loss'], label='Entrenamiento')
plt.plot(history.history['val_loss'], label='Validación')
plt.xlabel('Época')
plt.ylabel('Pérdidas')
plt.title('Pérdidas')
plt.ylim([0, 1])
plt.legend()
show_learning_curves_transf(history_unet_transf)
show_prediction(model_tl_unet)
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
model_tl_unet.save("model_unet_transf.h5")
model_tl_unet_sin_dice = sm.Unet(BACKBONE, input_shape=(N_COLS, N_ROWS, 3), classes=N_LABELS, activation='sigmoid', encoder_weights='imagenet', encoder_freeze=True)
model_tl_unet_sin_dice.compile(optimizer='adam',
loss="binary_crossentropy",
metrics=['accuracy'])
history_unet_transf_sin_dice = model_tl_unet_sin_dice.fit(train_dataset,
epochs=EPOCHS,
validation_data=validation_dataset,
callbacks=[model_checkpoint_callback, early_stopping],
verbose=1)
Epoch 1/12
63/63 [==============================] - 25s 342ms/step - loss: 0.2785 - accuracy: 0.9053 - val_loss: 1.0596 - val_accuracy: 0.8151
Epoch 00001: val_loss did not improve from 0.01429
Epoch 2/12
63/63 [==============================] - 21s 326ms/step - loss: 0.0546 - accuracy: 0.9821 - val_loss: 0.0998 - val_accuracy: 0.9696
Epoch 00002: val_loss did not improve from 0.01429
Epoch 3/12
63/63 [==============================] - 20s 322ms/step - loss: 0.0388 - accuracy: 0.9855 - val_loss: 0.0982 - val_accuracy: 0.9674
Epoch 00003: val_loss did not improve from 0.01429
Epoch 4/12
63/63 [==============================] - 20s 321ms/step - loss: 0.0317 - accuracy: 0.9877 - val_loss: 0.1341 - val_accuracy: 0.9580
Epoch 00004: val_loss did not improve from 0.01429
Epoch 5/12
63/63 [==============================] - 20s 317ms/step - loss: 0.0311 - accuracy: 0.9875 - val_loss: 0.1073 - val_accuracy: 0.9646
Epoch 00005: val_loss did not improve from 0.01429
Epoch 6/12
63/63 [==============================] - 19s 309ms/step - loss: 0.0263 - accuracy: 0.9893 - val_loss: 0.1039 - val_accuracy: 0.9660
Epoch 00006: val_loss did not improve from 0.01429
Epoch 7/12
63/63 [==============================] - 20s 320ms/step - loss: 0.0237 - accuracy: 0.9903 - val_loss: 0.1105 - val_accuracy: 0.9650
Epoch 00007: val_loss did not improve from 0.01429
Epoch 8/12
63/63 [==============================] - 20s 319ms/step - loss: 0.0215 - accuracy: 0.9911 - val_loss: 0.0744 - val_accuracy: 0.9745
Epoch 00008: val_loss did not improve from 0.01429
Epoch 9/12
63/63 [==============================] - 20s 321ms/step - loss: 0.0209 - accuracy: 0.9913 - val_loss: 0.0952 - val_accuracy: 0.9704
Epoch 00009: val_loss did not improve from 0.01429
Epoch 10/12
63/63 [==============================] - 20s 320ms/step - loss: 0.0190 - accuracy: 0.9920 - val_loss: 0.0764 - val_accuracy: 0.9749
Epoch 00010: val_loss did not improve from 0.01429
Epoch 11/12
63/63 [==============================] - 20s 320ms/step - loss: 0.0187 - accuracy: 0.9921 - val_loss: 0.0661 - val_accuracy: 0.9778
Epoch 00011: val_loss did not improve from 0.01429
Epoch 12/12
63/63 [==============================] - 20s 317ms/step - loss: 0.0181 - accuracy: 0.9924 - val_loss: 0.0631 - val_accuracy: 0.9793
Epoch 00012: val_loss did not improve from 0.01429
model_tl_unet_sin_dice.save("model_unet_transf_sin_dice.h5")
show_prediction(model_tl_unet_sin_dice)
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).