In [ ]:
!pip install patchify
Requirement already satisfied: patchify in /usr/local/lib/python3.10/dist-packages (0.2.3) Requirement already satisfied: numpy<2,>=1 in /usr/local/lib/python3.10/dist-packages (from patchify) (1.26.4)
In [ ]:
import os
import glob
import numpy as np
import pandas as pd
from datetime import datetime
import cv2
from PIL import Image
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import models, layers, regularizers
from tensorflow.keras import backend as K
from keras.utils import normalize
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda
from keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
In [ ]:
# For this example, I used a patch size of 128.
# Therefore, we get more small images.
patchsize = 128
step = 128
bandNum = 8
In [ ]:
def multi_unet_model(n_classes=2, IMG_HEIGHT=patchsize, IMG_WIDTH=patchsize, IMG_CHANNELS=bandNum):
#Build the model
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
dropoutRate = 0.1
s = Lambda(lambda x: x / 255)(inputs) #No need for this if we normalize our inputs beforehand
#s = inputs
print(s.shape)
#Contraction path
c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='RandomNormal', padding='same')(s)
c1 = Dropout(dropoutRate)(c1)
c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='RandomNormal', padding='same')(c1)
print('c1 size',c1.shape)
p1 = MaxPooling2D((2, 2))(c1)
print('p1 size',p1.shape)
c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p1)
c2 = Dropout(dropoutRate)(c2)
c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c2)
print('c2 size',c2.shape)
p2 = MaxPooling2D((2, 2))(c2)
print('p2 size',p2.shape)
c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p2)
c3 = Dropout(dropoutRate)(c3)
c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c3)
p3 = MaxPooling2D((2, 2))(c3)
print('c3 size',p3.shape)
print('p3 size',p3.shape)
c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p3)
c4 = Dropout(dropoutRate)(c4)
c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c4)
p4 = MaxPooling2D(pool_size=(2, 2))(c4)
print('c4 size',c4.shape)
print('p4 size',p4.shape)
c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p4)
c5 = Dropout(dropoutRate)(c5)
c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c5)
print('c5 size',c5.shape)
#Expansive path
u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
c6 = Dropout(dropoutRate)(c6)
c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
print('c6 size',c6.shape)
print('u6 size',u6.shape)
u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
c7 = Dropout(dropoutRate)(c7)
c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
print('c7 size',c7.shape)
print('u7 size',u7.shape)
u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
c8 = Dropout(dropoutRate)(c8)
c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
print('c8 size',c8.shape)
print('u8 size',u8.shape)
u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
c9 = Dropout(dropoutRate)(c9)
c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
print('c9 size',c9.shape)
print('u9 size',u9.shape)
outputs = Conv2D(n_classes, (1, 1), activation='softmax')(c9)
model = Model(inputs=[inputs], outputs=[outputs])
#NOTE: Compile the model in the main program to make it easy to test with various loss functions
#model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
#model.summary()
return model
In [ ]:
# Mount Google Drive
from google.colab import drive
drive.mount('/content/drive')
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
In [ ]:
# Change '/Course/1131/Data Mining/UNET_Example' to folder in your google driver
imagefile = '/content/drive/My Drive/Course/1131/Data Mining/UNET_Example/image.tif'
masksfile = '/content/drive/My Drive/Course/1131/Data Mining/UNET_Example/mask.tif'
In [ ]:
import os
from glob import glob
from PIL import Image # For working with .tif images
import tifffile as tiff
In [ ]:
# Read the image file
image = tiff.imread(imagefile) # Shape: (height, width, channels) or (bands, height, width)
print(f"Image shape: {image.shape}")
# Read the mask file
mask = tiff.imread(masksfile) # Shape: (height, width)
print(f"Mask shape: {mask.shape}")
Image shape: (2048, 2048, 8) Mask shape: (2048, 2048)
In [ ]:
from patchify import patchify, unpatchify
patches_img = patchify(image, (patchsize, patchsize,bandNum), step=step) #Step=256 for 256 patches means no overlap
print(patches_img.shape)
patches_mask = patchify(mask, (patchsize, patchsize), step=step) #Step=256 for 256 patches means no overlap
print(patches_mask.shape)
(16, 16, 1, 128, 128, 8) (16, 16, 128, 128)
In [ ]:
patchImg_stack=[]
patchMask_stack=[]
# Get the patches with label on it.
for i in range(patches_img.shape[0]):
for j in range(patches_img.shape[1]):
patch = patches_mask[i,j,:,:]
#sumV = np.sum(np.sum(patch))
#if sumV > 0:
patchImg_stack.append(patches_img[i,j,0,:,:,:])
patchMask_stack.append(patch)
# convert to np array
train_images = np.array(patchImg_stack)
train_masks = np.array(patchMask_stack)
nI = train_images.shape[0]
nI
Out[ ]:
256
In [ ]:
SIZE_X = patchsize
SIZE_Y = patchsize
n_classes=2 #Number of classes for segmentation
###############################################
#Encode labels... but multi dim array so need to flatten, encode and reshape
labelencoder = LabelEncoder()
n, h, w = train_masks.shape
print(n,h,w)
train_masks_reshaped = train_masks.reshape(-1,1)
print(train_masks_reshaped.shape)
train_masks_reshaped_encoded = labelencoder.fit_transform(train_masks_reshaped)
train_masks_encoded_original_shape = train_masks_reshaped_encoded.reshape(n, h, w)
np.unique(train_masks_encoded_original_shape)
#print(train_masks_encoded_original_shape.shape)
#################################################
train_images_expanded = np.expand_dims(train_images, axis=3)
print(train_images_expanded.shape)
#train_images = normalize(train_images, axis=1)
train_masks_input = np.expand_dims(train_masks_encoded_original_shape, axis=3)
print(train_masks_input.shape)
256 128 128 (4194304, 1)
/usr/local/lib/python3.10/dist-packages/sklearn/preprocessing/_label.py:114: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel(). y = column_or_1d(y, warn=True)
(256, 128, 128, 1, 8) (256, 128, 128, 1)
In [ ]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(train_images, train_masks_input, test_size = 0.20, random_state = 0)
#Further split training data t a smaller subset for quick testing of models
#X_train, X_do_not_use, y_train, y_do_not_use = train_test_split(X1, y1, test_size = 0.2, random_state = 0)
print("Class values in the dataset are ... ", np.unique(y_train)) # 0 is the background/few unlabeled
print(y_train.shape)
print(X_train.shape)
print(y_test.shape)
print(X_test.shape)
#=========================================================
train_masks_cat = to_categorical(y_train, num_classes=n_classes)
y_train_cat = train_masks_cat.reshape((y_train.shape[0], y_train.shape[1], y_train.shape[2], n_classes))
test_masks_cat = to_categorical(y_test, num_classes=n_classes)
y_test_cat = test_masks_cat.reshape((y_test.shape[0], y_test.shape[1], y_test.shape[2], n_classes))
IMG_HEIGHT = X_train.shape[1]
IMG_WIDTH = X_train.shape[2]
IMG_CHANNELS = X_train.shape[3]
print(X_train.shape)
Class values in the dataset are ... [0 1] (204, 128, 128, 1) (204, 128, 128, 8) (52, 128, 128, 1) (52, 128, 128, 8) (204, 128, 128, 8)
In [ ]:
#inputs = (IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
model = multi_unet_model()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
(None, 128, 128, 8) c1 size (None, 128, 128, 16) p1 size (None, 64, 64, 16) c2 size (None, 64, 64, 32) p2 size (None, 32, 32, 32) c3 size (None, 16, 16, 64) p3 size (None, 16, 16, 64) c4 size (None, 16, 16, 128) p4 size (None, 8, 8, 128) c5 size (None, 8, 8, 256) c6 size (None, 16, 16, 128) u6 size (None, 16, 16, 256) c7 size (None, 32, 32, 64) u7 size (None, 32, 32, 128) c8 size (None, 64, 64, 32) u8 size (None, 64, 64, 64) c9 size (None, 128, 128, 16) u9 size (None, 128, 128, 32) Model: "model_3" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_4 (InputLayer) [(None, 128, 128, 8)] 0 [] lambda_3 (Lambda) (None, 128, 128, 8) 0 ['input_4[0][0]'] conv2d_57 (Conv2D) (None, 128, 128, 16) 1168 ['lambda_3[0][0]'] dropout_27 (Dropout) (None, 128, 128, 16) 0 ['conv2d_57[0][0]'] conv2d_58 (Conv2D) (None, 128, 128, 16) 2320 ['dropout_27[0][0]'] max_pooling2d_12 (MaxPooli (None, 64, 64, 16) 0 ['conv2d_58[0][0]'] ng2D) conv2d_59 (Conv2D) (None, 64, 64, 32) 4640 ['max_pooling2d_12[0][0]'] dropout_28 (Dropout) (None, 64, 64, 32) 0 ['conv2d_59[0][0]'] conv2d_60 (Conv2D) (None, 64, 64, 32) 9248 ['dropout_28[0][0]'] max_pooling2d_13 (MaxPooli (None, 32, 32, 32) 0 ['conv2d_60[0][0]'] ng2D) conv2d_61 (Conv2D) (None, 32, 32, 64) 18496 ['max_pooling2d_13[0][0]'] dropout_29 (Dropout) (None, 32, 32, 64) 0 ['conv2d_61[0][0]'] conv2d_62 (Conv2D) (None, 32, 32, 64) 36928 ['dropout_29[0][0]'] max_pooling2d_14 (MaxPooli (None, 16, 16, 64) 0 ['conv2d_62[0][0]'] ng2D) conv2d_63 (Conv2D) (None, 16, 16, 128) 73856 ['max_pooling2d_14[0][0]'] dropout_30 (Dropout) (None, 16, 16, 128) 0 ['conv2d_63[0][0]'] conv2d_64 (Conv2D) (None, 16, 16, 128) 147584 ['dropout_30[0][0]'] max_pooling2d_15 (MaxPooli (None, 8, 8, 128) 0 ['conv2d_64[0][0]'] ng2D) conv2d_65 (Conv2D) (None, 8, 8, 256) 295168 ['max_pooling2d_15[0][0]'] dropout_31 (Dropout) (None, 8, 8, 256) 0 ['conv2d_65[0][0]'] conv2d_66 (Conv2D) (None, 8, 8, 256) 590080 ['dropout_31[0][0]'] conv2d_transpose_12 (Conv2 (None, 16, 16, 128) 131200 ['conv2d_66[0][0]'] DTranspose) concatenate_12 (Concatenat (None, 16, 16, 256) 0 ['conv2d_transpose_12[0][0]', e) 'conv2d_64[0][0]'] conv2d_67 (Conv2D) (None, 16, 16, 128) 295040 ['concatenate_12[0][0]'] dropout_32 (Dropout) (None, 16, 16, 128) 0 ['conv2d_67[0][0]'] conv2d_68 (Conv2D) (None, 16, 16, 128) 147584 ['dropout_32[0][0]'] conv2d_transpose_13 (Conv2 (None, 32, 32, 64) 32832 ['conv2d_68[0][0]'] DTranspose) concatenate_13 (Concatenat (None, 32, 32, 128) 0 ['conv2d_transpose_13[0][0]', e) 'conv2d_62[0][0]'] conv2d_69 (Conv2D) (None, 32, 32, 64) 73792 ['concatenate_13[0][0]'] dropout_33 (Dropout) (None, 32, 32, 64) 0 ['conv2d_69[0][0]'] conv2d_70 (Conv2D) (None, 32, 32, 64) 36928 ['dropout_33[0][0]'] conv2d_transpose_14 (Conv2 (None, 64, 64, 32) 8224 ['conv2d_70[0][0]'] DTranspose) concatenate_14 (Concatenat (None, 64, 64, 64) 0 ['conv2d_transpose_14[0][0]', e) 'conv2d_60[0][0]'] conv2d_71 (Conv2D) (None, 64, 64, 32) 18464 ['concatenate_14[0][0]'] dropout_34 (Dropout) (None, 64, 64, 32) 0 ['conv2d_71[0][0]'] conv2d_72 (Conv2D) (None, 64, 64, 32) 9248 ['dropout_34[0][0]'] conv2d_transpose_15 (Conv2 (None, 128, 128, 16) 2064 ['conv2d_72[0][0]'] DTranspose) concatenate_15 (Concatenat (None, 128, 128, 32) 0 ['conv2d_transpose_15[0][0]', e) 'conv2d_58[0][0]'] conv2d_73 (Conv2D) (None, 128, 128, 16) 4624 ['concatenate_15[0][0]'] dropout_35 (Dropout) (None, 128, 128, 16) 0 ['conv2d_73[0][0]'] conv2d_74 (Conv2D) (None, 128, 128, 16) 2320 ['dropout_35[0][0]'] conv2d_75 (Conv2D) (None, 128, 128, 2) 34 ['conv2d_74[0][0]'] ================================================================================================== Total params: 1941842 (7.41 MB) Trainable params: 1941842 (7.41 MB) Non-trainable params: 0 (0.00 Byte) __________________________________________________________________________________________________
In [ ]:
import datetime
start_time = datetime.datetime.now()
history = model.fit(X_train, y_train_cat,
batch_size = 8,
verbose=1,
epochs=100,
validation_data=(X_test, y_test_cat),
#class_weight=class_weights,
shuffle=True)
process_time = datetime.datetime.now() - start_time
print(f"Process time: {process_time.total_seconds():.3f} seconds")
Epoch 1/100 26/26 [==============================] - 9s 216ms/step - loss: 0.2347 - accuracy: 0.9527 - val_loss: 0.1130 - val_accuracy: 0.9777 Epoch 2/100 26/26 [==============================] - 5s 196ms/step - loss: 0.1528 - accuracy: 0.9675 - val_loss: 0.1190 - val_accuracy: 0.9777 Epoch 3/100 26/26 [==============================] - 5s 196ms/step - loss: 0.1426 - accuracy: 0.9675 - val_loss: 0.1226 - val_accuracy: 0.9777 Epoch 4/100 26/26 [==============================] - 5s 198ms/step - loss: 0.1460 - accuracy: 0.9675 - val_loss: 0.0993 - val_accuracy: 0.9777 Epoch 5/100 26/26 [==============================] - 5s 199ms/step - loss: 0.1479 - accuracy: 0.9675 - val_loss: 0.1010 - val_accuracy: 0.9777 Epoch 6/100 26/26 [==============================] - 5s 196ms/step - loss: 0.1381 - accuracy: 0.9675 - val_loss: 0.0977 - val_accuracy: 0.9777 Epoch 7/100 26/26 [==============================] - 5s 198ms/step - loss: 0.1329 - accuracy: 0.9675 - val_loss: 0.1001 - val_accuracy: 0.9777 Epoch 8/100 26/26 [==============================] - 5s 197ms/step - loss: 0.1284 - accuracy: 0.9675 - val_loss: 0.1220 - val_accuracy: 0.9777 Epoch 9/100 26/26 [==============================] - 5s 198ms/step - loss: 0.1372 - accuracy: 0.9675 - val_loss: 0.0891 - val_accuracy: 0.9777 Epoch 10/100 26/26 [==============================] - 5s 196ms/step - loss: 0.1263 - accuracy: 0.9675 - val_loss: 0.0862 - val_accuracy: 0.9777 Epoch 11/100 26/26 [==============================] - 5s 198ms/step - loss: 0.1162 - accuracy: 0.9675 - val_loss: 0.0765 - val_accuracy: 0.9777 Epoch 12/100 26/26 [==============================] - 5s 189ms/step - loss: 0.1089 - accuracy: 0.9675 - val_loss: 0.0703 - val_accuracy: 0.9777 Epoch 13/100 26/26 [==============================] - 5s 204ms/step - loss: 0.1025 - accuracy: 0.9675 - val_loss: 0.0653 - val_accuracy: 0.9777 Epoch 14/100 26/26 [==============================] - 5s 194ms/step - loss: 0.1018 - accuracy: 0.9675 - val_loss: 0.0888 - val_accuracy: 0.9777 Epoch 15/100 26/26 [==============================] - 5s 199ms/step - loss: 0.1157 - accuracy: 0.9675 - val_loss: 0.1079 - val_accuracy: 0.9777 Epoch 16/100 26/26 [==============================] - 5s 209ms/step - loss: 0.1230 - accuracy: 0.9675 - val_loss: 0.0727 - val_accuracy: 0.9777 Epoch 17/100 26/26 [==============================] - 5s 200ms/step - loss: 0.1173 - accuracy: 0.9675 - val_loss: 0.0716 - val_accuracy: 0.9777 Epoch 18/100 26/26 [==============================] - 5s 201ms/step - loss: 0.0930 - accuracy: 0.9675 - val_loss: 0.0597 - val_accuracy: 0.9777 Epoch 19/100 26/26 [==============================] - 5s 194ms/step - loss: 0.0793 - accuracy: 0.9675 - val_loss: 0.0576 - val_accuracy: 0.9777 Epoch 20/100 26/26 [==============================] - 5s 200ms/step - loss: 0.0832 - accuracy: 0.9677 - val_loss: 0.0504 - val_accuracy: 0.9787 Epoch 21/100 26/26 [==============================] - 5s 200ms/step - loss: 0.0653 - accuracy: 0.9768 - val_loss: 0.0456 - val_accuracy: 0.9874 Epoch 22/100 26/26 [==============================] - 5s 196ms/step - loss: 0.0485 - accuracy: 0.9856 - val_loss: 0.1344 - val_accuracy: 0.9777 Epoch 23/100 26/26 [==============================] - 5s 193ms/step - loss: 0.1098 - accuracy: 0.9698 - val_loss: 0.0576 - val_accuracy: 0.9826 Epoch 24/100 26/26 [==============================] - 5s 194ms/step - loss: 0.0710 - accuracy: 0.9706 - val_loss: 0.0500 - val_accuracy: 0.9907 Epoch 25/100 26/26 [==============================] - 5s 195ms/step - loss: 0.0517 - accuracy: 0.9855 - val_loss: 0.0282 - val_accuracy: 0.9922 Epoch 26/100 26/26 [==============================] - 5s 207ms/step - loss: 0.1078 - accuracy: 0.9717 - val_loss: 0.0571 - val_accuracy: 0.9782 Epoch 27/100 26/26 [==============================] - 5s 192ms/step - loss: 0.0777 - accuracy: 0.9776 - val_loss: 0.0410 - val_accuracy: 0.9889 Epoch 28/100 26/26 [==============================] - 5s 201ms/step - loss: 0.0572 - accuracy: 0.9823 - val_loss: 0.0495 - val_accuracy: 0.9850 Epoch 29/100 26/26 [==============================] - 5s 196ms/step - loss: 0.0746 - accuracy: 0.9765 - val_loss: 0.0692 - val_accuracy: 0.9760 Epoch 30/100 26/26 [==============================] - 5s 192ms/step - loss: 0.0561 - accuracy: 0.9826 - val_loss: 0.0291 - val_accuracy: 0.9914 Epoch 31/100 26/26 [==============================] - 5s 193ms/step - loss: 0.0904 - accuracy: 0.9717 - val_loss: 0.0456 - val_accuracy: 0.9805 Epoch 32/100 26/26 [==============================] - 5s 192ms/step - loss: 0.0650 - accuracy: 0.9750 - val_loss: 0.0356 - val_accuracy: 0.9917 Epoch 33/100 26/26 [==============================] - 5s 192ms/step - loss: 0.0426 - accuracy: 0.9862 - val_loss: 0.0293 - val_accuracy: 0.9867 Epoch 34/100 26/26 [==============================] - 5s 204ms/step - loss: 0.0903 - accuracy: 0.9714 - val_loss: 0.0511 - val_accuracy: 0.9782 Epoch 35/100 26/26 [==============================] - 5s 197ms/step - loss: 0.0554 - accuracy: 0.9719 - val_loss: 0.0325 - val_accuracy: 0.9891 Epoch 36/100 26/26 [==============================] - 5s 197ms/step - loss: 0.0389 - accuracy: 0.9855 - val_loss: 0.0304 - val_accuracy: 0.9906 Epoch 37/100 26/26 [==============================] - 5s 196ms/step - loss: 0.0367 - accuracy: 0.9874 - val_loss: 0.0269 - val_accuracy: 0.9883 Epoch 38/100 26/26 [==============================] - 5s 193ms/step - loss: 0.0288 - accuracy: 0.9896 - val_loss: 0.0328 - val_accuracy: 0.9849 Epoch 39/100 26/26 [==============================] - 5s 205ms/step - loss: 0.0442 - accuracy: 0.9838 - val_loss: 0.0234 - val_accuracy: 0.9919 Epoch 40/100 26/26 [==============================] - 5s 200ms/step - loss: 0.0544 - accuracy: 0.9845 - val_loss: 0.0242 - val_accuracy: 0.9928 Epoch 41/100 26/26 [==============================] - 5s 198ms/step - loss: 0.0343 - accuracy: 0.9886 - val_loss: 0.0181 - val_accuracy: 0.9937 Epoch 42/100 26/26 [==============================] - 5s 198ms/step - loss: 0.0293 - accuracy: 0.9896 - val_loss: 0.0165 - val_accuracy: 0.9938 Epoch 43/100 26/26 [==============================] - 5s 202ms/step - loss: 0.0241 - accuracy: 0.9909 - val_loss: 0.0171 - val_accuracy: 0.9940 Epoch 44/100 26/26 [==============================] - 5s 202ms/step - loss: 0.0264 - accuracy: 0.9898 - val_loss: 0.0182 - val_accuracy: 0.9926 Epoch 45/100 26/26 [==============================] - 5s 200ms/step - loss: 0.0235 - accuracy: 0.9909 - val_loss: 0.0233 - val_accuracy: 0.9908 Epoch 46/100 26/26 [==============================] - 5s 205ms/step - loss: 0.0240 - accuracy: 0.9910 - val_loss: 0.0179 - val_accuracy: 0.9925 Epoch 47/100 26/26 [==============================] - 5s 202ms/step - loss: 0.0207 - accuracy: 0.9917 - val_loss: 0.0236 - val_accuracy: 0.9932 Epoch 48/100 26/26 [==============================] - 5s 199ms/step - loss: 0.0224 - accuracy: 0.9914 - val_loss: 0.0179 - val_accuracy: 0.9936 Epoch 49/100 26/26 [==============================] - 5s 197ms/step - loss: 0.0294 - accuracy: 0.9898 - val_loss: 0.0145 - val_accuracy: 0.9945 Epoch 50/100 26/26 [==============================] - 5s 195ms/step - loss: 0.0215 - accuracy: 0.9918 - val_loss: 0.0145 - val_accuracy: 0.9947 Epoch 51/100 26/26 [==============================] - 5s 194ms/step - loss: 0.0198 - accuracy: 0.9923 - val_loss: 0.0143 - val_accuracy: 0.9946 Epoch 52/100 26/26 [==============================] - 5s 198ms/step - loss: 0.0215 - accuracy: 0.9917 - val_loss: 0.0190 - val_accuracy: 0.9934 Epoch 53/100 26/26 [==============================] - 5s 194ms/step - loss: 0.0652 - accuracy: 0.9807 - val_loss: 0.0368 - val_accuracy: 0.9835 Epoch 54/100 26/26 [==============================] - 5s 201ms/step - loss: 0.0872 - accuracy: 0.9726 - val_loss: 0.0334 - val_accuracy: 0.9848 Epoch 55/100 26/26 [==============================] - 5s 203ms/step - loss: 0.0396 - accuracy: 0.9871 - val_loss: 0.0161 - val_accuracy: 0.9944 Epoch 56/100 26/26 [==============================] - 5s 192ms/step - loss: 0.0244 - accuracy: 0.9915 - val_loss: 0.0157 - val_accuracy: 0.9942 Epoch 57/100 26/26 [==============================] - 5s 204ms/step - loss: 0.0220 - accuracy: 0.9917 - val_loss: 0.0144 - val_accuracy: 0.9947 Epoch 58/100 26/26 [==============================] - 5s 197ms/step - loss: 0.0206 - accuracy: 0.9924 - val_loss: 0.0138 - val_accuracy: 0.9947 Epoch 59/100 26/26 [==============================] - 5s 202ms/step - loss: 0.0189 - accuracy: 0.9931 - val_loss: 0.0148 - val_accuracy: 0.9941 Epoch 60/100 26/26 [==============================] - 5s 197ms/step - loss: 0.0180 - accuracy: 0.9928 - val_loss: 0.0210 - val_accuracy: 0.9950 Epoch 61/100 26/26 [==============================] - 5s 198ms/step - loss: 0.0363 - accuracy: 0.9866 - val_loss: 0.0188 - val_accuracy: 0.9932 Epoch 62/100 26/26 [==============================] - 5s 203ms/step - loss: 0.0249 - accuracy: 0.9906 - val_loss: 0.0144 - val_accuracy: 0.9949 Epoch 63/100 26/26 [==============================] - 5s 208ms/step - loss: 0.0213 - accuracy: 0.9919 - val_loss: 0.0146 - val_accuracy: 0.9942 Epoch 64/100 26/26 [==============================] - 5s 197ms/step - loss: 0.0202 - accuracy: 0.9920 - val_loss: 0.0140 - val_accuracy: 0.9947 Epoch 65/100 26/26 [==============================] - 5s 205ms/step - loss: 0.0181 - accuracy: 0.9930 - val_loss: 0.0136 - val_accuracy: 0.9950 Epoch 66/100 26/26 [==============================] - 5s 199ms/step - loss: 0.0180 - accuracy: 0.9929 - val_loss: 0.0134 - val_accuracy: 0.9946 Epoch 67/100 26/26 [==============================] - 5s 208ms/step - loss: 0.0179 - accuracy: 0.9930 - val_loss: 0.0135 - val_accuracy: 0.9949 Epoch 68/100 26/26 [==============================] - 5s 199ms/step - loss: 0.0165 - accuracy: 0.9935 - val_loss: 0.0132 - val_accuracy: 0.9954 Epoch 69/100 26/26 [==============================] - 5s 192ms/step - loss: 0.0165 - accuracy: 0.9934 - val_loss: 0.0130 - val_accuracy: 0.9949 Epoch 70/100 26/26 [==============================] - 5s 198ms/step - loss: 0.0207 - accuracy: 0.9931 - val_loss: 0.0999 - val_accuracy: 0.9571 Epoch 71/100 26/26 [==============================] - 5s 195ms/step - loss: 0.0956 - accuracy: 0.9727 - val_loss: 0.0652 - val_accuracy: 0.9777 Epoch 72/100 26/26 [==============================] - 5s 202ms/step - loss: 0.0740 - accuracy: 0.9761 - val_loss: 0.0257 - val_accuracy: 0.9914 Epoch 73/100 26/26 [==============================] - 5s 201ms/step - loss: 0.0399 - accuracy: 0.9852 - val_loss: 0.0238 - val_accuracy: 0.9902 Epoch 74/100 26/26 [==============================] - 5s 197ms/step - loss: 0.0281 - accuracy: 0.9892 - val_loss: 0.0164 - val_accuracy: 0.9938 Epoch 75/100 26/26 [==============================] - 5s 198ms/step - loss: 0.0227 - accuracy: 0.9914 - val_loss: 0.0153 - val_accuracy: 0.9942 Epoch 76/100 26/26 [==============================] - 5s 197ms/step - loss: 0.0227 - accuracy: 0.9914 - val_loss: 0.0187 - val_accuracy: 0.9927 Epoch 77/100 26/26 [==============================] - 5s 197ms/step - loss: 0.0264 - accuracy: 0.9899 - val_loss: 0.0176 - val_accuracy: 0.9935 Epoch 78/100 26/26 [==============================] - 5s 201ms/step - loss: 0.0215 - accuracy: 0.9919 - val_loss: 0.0184 - val_accuracy: 0.9930 Epoch 79/100 26/26 [==============================] - 5s 197ms/step - loss: 0.0256 - accuracy: 0.9901 - val_loss: 0.0196 - val_accuracy: 0.9928 Epoch 80/100 26/26 [==============================] - 5s 208ms/step - loss: 0.0209 - accuracy: 0.9920 - val_loss: 0.0158 - val_accuracy: 0.9941 Epoch 81/100 26/26 [==============================] - 5s 199ms/step - loss: 0.0369 - accuracy: 0.9870 - val_loss: 0.0211 - val_accuracy: 0.9929 Epoch 82/100 26/26 [==============================] - 5s 199ms/step - loss: 0.0548 - accuracy: 0.9745 - val_loss: 0.0301 - val_accuracy: 0.9900 Epoch 83/100 26/26 [==============================] - 5s 207ms/step - loss: 0.0355 - accuracy: 0.9889 - val_loss: 0.0217 - val_accuracy: 0.9935 Epoch 84/100 26/26 [==============================] - 5s 193ms/step - loss: 0.0257 - accuracy: 0.9911 - val_loss: 0.0150 - val_accuracy: 0.9942 Epoch 85/100 26/26 [==============================] - 5s 208ms/step - loss: 0.0217 - accuracy: 0.9921 - val_loss: 0.0148 - val_accuracy: 0.9943 Epoch 86/100 26/26 [==============================] - 5s 206ms/step - loss: 0.0186 - accuracy: 0.9929 - val_loss: 0.0147 - val_accuracy: 0.9942 Epoch 87/100 26/26 [==============================] - 5s 207ms/step - loss: 0.0173 - accuracy: 0.9932 - val_loss: 0.0143 - val_accuracy: 0.9944 Epoch 88/100 26/26 [==============================] - 6s 213ms/step - loss: 0.0163 - accuracy: 0.9936 - val_loss: 0.0146 - val_accuracy: 0.9944 Epoch 89/100 26/26 [==============================] - 5s 206ms/step - loss: 0.0182 - accuracy: 0.9928 - val_loss: 0.0147 - val_accuracy: 0.9943 Epoch 90/100 26/26 [==============================] - 5s 210ms/step - loss: 0.0167 - accuracy: 0.9934 - val_loss: 0.0136 - val_accuracy: 0.9945 Epoch 91/100 26/26 [==============================] - 5s 194ms/step - loss: 0.0162 - accuracy: 0.9936 - val_loss: 0.0134 - val_accuracy: 0.9948 Epoch 92/100 26/26 [==============================] - 5s 208ms/step - loss: 0.0150 - accuracy: 0.9940 - val_loss: 0.0148 - val_accuracy: 0.9943 Epoch 93/100 26/26 [==============================] - 5s 201ms/step - loss: 0.0153 - accuracy: 0.9939 - val_loss: 0.0155 - val_accuracy: 0.9940 Epoch 94/100 26/26 [==============================] - 5s 201ms/step - loss: 0.0168 - accuracy: 0.9935 - val_loss: 0.0137 - val_accuracy: 0.9946 Epoch 95/100 26/26 [==============================] - 5s 203ms/step - loss: 0.0194 - accuracy: 0.9923 - val_loss: 0.0170 - val_accuracy: 0.9937 Epoch 96/100 26/26 [==============================] - 5s 205ms/step - loss: 0.0166 - accuracy: 0.9935 - val_loss: 0.0151 - val_accuracy: 0.9943 Epoch 97/100 26/26 [==============================] - 5s 208ms/step - loss: 0.0146 - accuracy: 0.9942 - val_loss: 0.0132 - val_accuracy: 0.9948 Epoch 98/100 26/26 [==============================] - 5s 205ms/step - loss: 0.0142 - accuracy: 0.9942 - val_loss: 0.0141 - val_accuracy: 0.9946 Epoch 99/100 26/26 [==============================] - 5s 199ms/step - loss: 0.0137 - accuracy: 0.9945 - val_loss: 0.0141 - val_accuracy: 0.9947 Epoch 100/100 26/26 [==============================] - 5s 201ms/step - loss: 0.0148 - accuracy: 0.9940 - val_loss: 0.0148 - val_accuracy: 0.9948 Process time: 522.346 seconds
In [ ]:
_, acc = model.evaluate(X_test, y_test_cat)
print("Accuracy is = ", (acc * 100.0), "%")
loss = history.history['loss']
val_loss = history.history['val_loss']
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
df_history = pd.DataFrame({'loss': loss, 'val_loss': val_loss,'acc': acc, 'val_acc': val_acc})
epochs = range(1, len(loss) + 1)
plt.rcParams.update(plt.rcParamsDefault)
plt.plot(epochs, loss, 'o-', label='Training', markersize=5,color='#4f6b8d') # 'o-' adds a circle marker
plt.plot(epochs, val_loss, 'o-', label='Validation', markersize=5,color='#cf3832') # 'o-' adds a circle marker
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend(fontsize='large') # Set the legend font size to large
plt.grid(linestyle='--')
plt.tight_layout(pad=1.0)
#plt.savefig('SolarPanel_UNET_loss_'+formatted_date+'v1.tif', dpi=300, format='tiff')
plt.show()
#plt.rcParams.update({'font.family': 'Microsoft JhengHei', 'font.size': 16})
plt.plot(epochs, acc, 'o-', label='Training', markersize=5,color='#4f6b8d') # 'o-' adds a circle marker
plt.plot(epochs, val_acc, 'o-', label='Validation', markersize=5,color='#cf3832') # 'o-' adds a circle marker
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(fontsize='large') # Set the legend font size to large
plt.grid(linestyle='--')
plt.tight_layout(pad=1.0)
#plt.savefig('SolarPanel_UNET_Accuracy_'+formatted_date+'v1.tif', dpi=300, format='tiff')
plt.show()
2/2 [==============================] - 0s 67ms/step - loss: 0.0148 - accuracy: 0.9948 Accuracy is = 99.481201171875 %
In [ ]:
def convertToUint8(img):
return (img.astype(np.float64)/20).astype(np.uint8)
# ============================================
def visualize3Images(image1, image2, image3):
plt.figure()
plt.subplot(1,3,1)
plt.title('RGB image', fontsize=10)
plt.imshow(image1)
plt.axis('off')
plt.subplot(1,3,2)
plt.title('Prediction', fontsize=10)
plt.imshow(image2,cmap='magma')
plt.axis('off')
plt.subplot(1,3,3)
plt.title('Ground Truth', fontsize=10)
plt.imshow(image3,cmap='magma')
plt.axis('off')
#=========================================================
# visualize classification result of UNET
y_pred_train_images = model.predict(train_images)
prediction_train_images = np.argmax(y_pred_train_images, axis=3)
#for i in range(train_masks.shape[0]):
for i in range(64):
image1 = train_images[i,:,:,(5,3,1)]
image1 = np.transpose(image1, (1, 2, 0))
image1 = convertToUint8(image1)
image2 = prediction_train_images[i,:,:]
image3 = train_masks[i,:,:]
visualize3Images(image1,image2,image3)
plt.show()
8/8 [==============================] - 2s 97ms/step
In [ ]:
unpatchifiedSize = patches_img.shape[0:2]
unpatchifiedSize = tuple(value * 128 for value in unpatchifiedSize)
reconstructed_mask = unpatchify(patches_mask, unpatchifiedSize)
print(reconstructed_mask.shape)
reshaped_prediction = prediction_train_images.reshape(16, 16, 128, 128)
reconstructed_pred = unpatchify(reshaped_prediction,unpatchifiedSize)
unpatchifiedImgSize = patches_img.shape[0:2]
unpatchifiedImgSize = tuple(value * 128 for value in unpatchifiedImgSize) + (8,)
reconstructed_image = unpatchify(patches_img, unpatchifiedImgSize)
fig, ax = plt.subplots(1,3,figsize=(12,4))
ax[0].imshow((reconstructed_image[:,:,(5,3,1)].astype(np.float64)/16).astype(np.uint8))
ax[1].imshow(reconstructed_mask,cmap='magma')
ax[2].imshow(reconstructed_pred,cmap='magma')
plt.show()
(2048, 2048)
In [ ]:
# calculate IoU
def calculate_iou(image1, image2):
# Calculate the intersection (logical AND operation)
intersection = np.logical_and(image1, image2)
# Calculate the union (logical OR operation)
union = np.logical_or(image1, image2)
# Calculate the number of pixels in the intersection and union
intersection_count = np.sum(intersection)
union_count = np.sum(union)
# Avoid division by zero
if union_count == 0 :
iou = 0
else:
iou = intersection_count / union_count
# Calculate the IoU
return [intersection_count,union_count,iou]
In [ ]:
[intersection_count,union_count,iou]=calculate_iou(reconstructed_mask, reconstructed_pred)
print(iou)
0.8078708773159301