2000字范文,分享全网优秀范文,学习好帮手!
2000字范文 > Keras:Unet网络实现多类语义分割方式

Keras:Unet网络实现多类语义分割方式

时间:2023-10-25 23:42:54

相关推荐

Keras:Unet网络实现多类语义分割方式

Keras:Unet网络实现多类语义分割方式

更多python视频教程请到菜鸟教程/

1 介绍

U-Net最初是用来对医学图像的语义分割,后来也有人将其应用于其他领域。但大多还是用来进行二分类,即将原始图像分成两个灰度级或者色度,依次找到图像中感兴趣的目标部分。

本文主要利用U-Net网络结构实现了多类的语义分割,并展示了部分测试效果,希望对你有用!

2 源代码

(1)训练模型

from __future__ import print_functionimport osimport datetimeimport numpy as npfrom keras.models import Modelfrom keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose, AveragePooling2D, Dropout, \BatchNormalizationfrom keras.optimizers import Adamfrom keras.layers.convolutional import UpSampling2D, Conv2Dfrom keras.callbacks import ModelCheckpointfrom keras import backend as Kfrom keras.layers.advanced_activations import LeakyReLU, ReLUimport cv2

PIXEL = 512 #set your image size

BATCH_SIZE = 5

lr = 0.001

EPOCH = 100

X_CHANNEL = 3 # training images channel

Y_CHANNEL = 1 # label iamges channel

X_NUM = 422 # your traning data number

pathX = ‘I:\Pascal VOC Dataset\train1\images\’ #change your file path

pathY = ‘I:\Pascal VOC Dataset\train1\SegmentationObject\’ #change your file path

#data processing

def generator(pathX, pathY,BATCH_SIZE):

while 1:

X_train_files = os.listdir(pathX)

Y_train_files = os.listdir(pathY)

a = (np.arange(1, X_NUM))

X = []

Y = []

for i in range(BATCH_SIZE):

index = np.random.choice(a)

print(index)

img = cv2.imread(pathX + X_train_files[index], 1)

img = np.array(img).reshape(PIXEL, PIXEL, X_CHANNEL)

X.append(img)

img1 = cv2.imread(pathY + Y_train_files[index], 1)

img1 = np.array(img1).reshape(PIXEL, PIXEL, Y_CHANNEL)

Y.append(img1)

X = np.array(X)

Y = np.array(Y)

yield X, Y

#creat unet network

inputs = Input((PIXEL, PIXEL, 3))

conv1 = Conv2D(8, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(inputs)

pool1 = AveragePooling2D(pool_size=(2, 2))(conv1) # 16

conv2 = BatchNormalization(momentum=0.99)(pool1)

conv2 = Conv2D(64, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv2)

conv2 = BatchNormalization(momentum=0.99)(conv2)

conv2 = Conv2D(64, 1, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv2)

conv2 = Dropout(0.02)(conv2)

pool2 = AveragePooling2D(pool_size=(2, 2))(conv2) # 8

conv3 = BatchNormalization(momentum=0.99)(pool2)

conv3 = Conv2D(128, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv3)

conv3 = BatchNormalization(momentum=0.99)(conv3)

conv3 = Conv2D(128, 1, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv3)

conv3 = Dropout(0.02)(conv3)

pool3 = AveragePooling2D(pool_size=(2, 2))(conv3) # 4

conv4 = BatchNormalization(momentum=0.99)(pool3)

conv4 = Conv2D(256, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv4)

conv4 = BatchNormalization(momentum=0.99)(conv4)

conv4 = Conv2D(256, 1, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv4)

conv4 = Dropout(0.02)(conv4)

pool4 = AveragePooling2D(pool_size=(2, 2))(conv4)

conv5 = BatchNormalization(momentum=0.99)(pool4)

conv5 = Conv2D(512, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv5)

conv5 = BatchNormalization(momentum=0.99)(conv5)

conv5 = Conv2D(512, 1, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv5)

conv5 = Dropout(0.02)(conv5)

pool4 = AveragePooling2D(pool_size=(2, 2))(conv4)

conv5 = Conv2D(35, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv4)

drop4 = Dropout(0.02)(conv5)

pool4 = AveragePooling2D(pool_size=(2, 2))(pool3) # 2

pool5 = AveragePooling2D(pool_size=(2, 2))(pool4) # 1

conv6 = BatchNormalization(momentum=0.99)(pool5)

conv6 = Conv2D(256, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv6)

conv7 = Conv2D(256, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv6)

up7 = (UpSampling2D(size=(2, 2))(conv7)) # 2

conv7 = Conv2D(256, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(up7)

merge7 = concatenate([pool4, conv7], axis=3)

conv8 = Conv2D(128, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(merge7)

up8 = (UpSampling2D(size=(2, 2))(conv8)) # 4

conv8 = Conv2D(128, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(up8)

merge8 = concatenate([pool3, conv8], axis=3)

conv9 = Conv2D(64, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(merge8)

up9 = (UpSampling2D(size=(2, 2))(conv9)) # 8

conv9 = Conv2D(64, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(up9)

merge9 = concatenate([pool2, conv9], axis=3)

conv10 = Conv2D(32, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(merge9)

up10 = (UpSampling2D(size=(2, 2))(conv10)) # 16

conv10 = Conv2D(32, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(up10)

conv11 = Conv2D(16, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv10)

up11 = (UpSampling2D(size=(2, 2))(conv11)) # 32

conv11 = Conv2D(8, 3, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(up11)

conv12 = Conv2D(3, 1, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv11)

conv12 = Conv2D(3, 1, activation=‘relu’, padding=‘same’, kernel_initializer=‘he_normal’)(conv11)

model = Model(input=inputs, output=conv12)

print(model.summary())

pile(optimizer=Adam(lr=1e-3), loss=‘mse’, metrics=[‘accuracy’])

history = model.fit_generator(generator(pathX, pathY,BATCH_SIZE),

steps_per_epoch=600, nb_epoch=EPOCH)

end_time = datetime.datetime.now().strftime(’%Y-%m-%d %H:%M:%S’)

#save your training model

model.save(r’V1_828.h5’)

#save your loss data

mse = np.array((history.history[‘loss’]))

np.save(r’V1_828.npy’, mse)

(2)测试模型

from keras.models import load_modelimport numpy as npimport matplotlib.pyplot as pltimport osimport cv2

model = load_model(‘V1_828.h5’)

test_images_path = ‘I:\Pascal VOC Dataset\test\test_images\’

test_gt_path = ‘I:\Pascal VOC Dataset\test\SegmentationObject\’

pre_path = ‘I:\Pascal VOC Dataset\test\pre\’

X = []

for info in os.listdir(test_images_path):

A = cv2.imread(test_images_path + info)

X.append(A)

i += 1

X = np.array(X)

print(X.shape)

Y = model.predict(X)

groudtruth = []

for info in os.listdir(test_gt_path):

A = cv2.imread(test_gt_path + info)

groudtruth.append(A)

groudtruth = np.array(groudtruth)

i = 0

for info in os.listdir(test_images_path):

cv2.imwrite(pre_path + info,Y[i])

i += 1

a = range(10)

n = np.random.choice(a)

cv2.imwrite(‘prediction.png’,Y[n])

cv2.imwrite(‘groudtruth.png’,groudtruth[n])

fig, axs = plt.subplots(1, 3)

cnt = 1

for j in range(1):

axs[0].imshow(np.abs(X[n]))

axs[0].axis(‘off’)

axs[1].imshow(np.abs(Y[n]))

axs[1].axis(‘off’)

axs[2].imshow(np.abs(groudtruth[n]))

axs[2].axis(‘off’)

cnt += 1

fig.savefig(“imagestest.png”)

plt.close()

3 效果展示

说明:从左到右依次是预测图像,真实图像,标注图像。可以看出,对于部分数据的分割效果还有待改进,主要原因还是数据集相对复杂,模型难于找到其中的规律。

以上这篇Keras:Unet网络实现多类语义分割方式就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多

茂名论坛/

化州橘红/

茂名论坛/

源码搜藏网/

茂名市高级技工学校(茂名一技)/

茂名一技/

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。