深度学习-Tensorflow2.2-图像处理{10}-UNET图像语义分割模型-24
生活随笔
收集整理的這篇文章主要介紹了
深度学习-Tensorflow2.2-图像处理{10}-UNET图像语义分割模型-24
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
UNET圖像語義分割模型簡介
代碼
import tensorflow as tf import matplotlib.pyplot as plt %matplotlib inline import numpy as np import glob import os # 顯存自適應分配 gpus = tf.config.experimental.list_physical_devices(device_type='GPU') for gpu in gpus:tf.config.experimental.set_memory_growth(gpu,True) gpu_ok = tf.test.is_gpu_available() print("tf version:", tf.__version__) print("use GPU", gpu_ok) # 判斷是否使用gpu進行訓練獲取訓練數據及目標值
# 獲取train文件下所有文件中所有png的圖片 img = glob.glob("G:/BaiduNetdiskDownload/cityscapes/leftImg8bit/train/*/*.png") train_count = len(img) img[:5],train_count # 獲取gtFine/train文件下所有文件中所有_gtFine_labelIds.png的圖片 label = glob.glob("G:/BaiduNetdiskDownload/cityscapes/gtFine/train/*/*_gtFine_labelIds.png") index = np.random.permutation(len(img)) # 創建一個隨即種子,保障image和label 隨機后還是一一對應的 img = np.array(img)[index] # 對訓練集圖片進行亂序 label = np.array(label)[index]
獲取測試數據
創建數據集
定義unet模型
def create_model():## unet網絡結構下采樣部分# 輸入層 第一部分inputs = tf.keras.layers.Input(shape = (256,256,3))x = tf.keras.layers.Conv2D(64,3,padding="same",activation="relu")(inputs)x = tf.keras.layers.BatchNormalization()(x)x = tf.keras.layers.Conv2D(64,3,padding="same",activation="relu")(x)x = tf.keras.layers.BatchNormalization()(x) # 256*256*64# 下采樣x1 = tf.keras.layers.MaxPooling2D(padding="same")(x) # 128*128*64# 卷積 第二部分x1 = tf.keras.layers.Conv2D(128,3,padding="same",activation="relu")(x1)x1 = tf.keras.layers.BatchNormalization()(x1)x1 = tf.keras.layers.Conv2D(128,3,padding="same",activation="relu")(x1)x1 = tf.keras.layers.BatchNormalization()(x1) # 128*128*128# 下采樣 x2 = tf.keras.layers.MaxPooling2D(padding="same")(x1) # 64*64*128# 卷積 第三部分x2 = tf.keras.layers.Conv2D(256,3,padding="same",activation="relu")(x2)x2 = tf.keras.layers.BatchNormalization()(x2)x2 = tf.keras.layers.Conv2D(256,3,padding="same",activation="relu")(x2)x2 = tf.keras.layers.BatchNormalization()(x2) # 64*64*256# 下采樣x3 = tf.keras.layers.MaxPooling2D(padding="same")(x2) # 32*32*256# 卷積 第四部分x3 = tf.keras.layers.Conv2D(512,3,padding="same",activation="relu")(x3)x3 = tf.keras.layers.BatchNormalization()(x3)x3 = tf.keras.layers.Conv2D(512,3,padding="same",activation="relu")(x3)x3 = tf.keras.layers.BatchNormalization()(x3) # 32*32*512# 下采樣x4 = tf.keras.layers.MaxPooling2D(padding="same")(x3) # 16*16*512# 卷積 第五部分x4 = tf.keras.layers.Conv2D(1024,3,padding="same",activation="relu")(x4)x4 = tf.keras.layers.BatchNormalization()(x4)x4 = tf.keras.layers.Conv2D(1024,3,padding="same",activation="relu")(x4)x4 = tf.keras.layers.BatchNormalization()(x4) # 16*16*1024## unet網絡結構上采樣部分# 反卷積 第一部分 512個卷積核 卷積核大小2*2 跨度2 填充方式same 激活relux5 = tf.keras.layers.Conv2DTranspose(512,2,strides=2,padding="same",activation="relu")(x4)#32*32*512x5 = tf.keras.layers.BatchNormalization()(x5)x6 = tf.concat([x3,x5],axis=-1)#合并 32*32*1024# 卷積x6 = tf.keras.layers.Conv2D(512,3,padding="same",activation="relu")(x6)x6 = tf.keras.layers.BatchNormalization()(x6)x6 = tf.keras.layers.Conv2D(512,3,padding="same",activation="relu")(x6)x6 = tf.keras.layers.BatchNormalization()(x6) # 32*32*512# 反卷積 第二部分x7 = tf.keras.layers.Conv2DTranspose(256,2,strides=2,padding="same",activation="relu")(x6)#64*64*256x7 = tf.keras.layers.BatchNormalization()(x7)x8 = tf.concat([x2,x7],axis=-1)#合并 64*64*512# 卷積x8 = tf.keras.layers.Conv2D(256,3,padding="same",activation="relu")(x8)x8 = tf.keras.layers.BatchNormalization()(x8)x8 = tf.keras.layers.Conv2D(256,3,padding="same",activation="relu")(x8)x8 = tf.keras.layers.BatchNormalization()(x8) # #64*64*256# 反卷積 第三部分x9 = tf.keras.layers.Conv2DTranspose(128,2,strides=2,padding="same",activation="relu")(x8)# 128*128*128x9 = tf.keras.layers.BatchNormalization()(x9)x10 = tf.concat([x1,x9],axis=-1)#合并 128*128*256# 卷積x10 = tf.keras.layers.Conv2D(128,3,padding="same",activation="relu")(x10)x10 = tf.keras.layers.BatchNormalization()(x10)x10 = tf.keras.layers.Conv2D(128,3,padding="same",activation="relu")(x10)x10 = tf.keras.layers.BatchNormalization()(x10) # 128*128*128# 反卷積 第四部分x11 = tf.keras.layers.Conv2DTranspose(64,2,strides=2,padding="same",activation="relu")(x10)# 256*256*64x11 = tf.keras.layers.BatchNormalization()(x11)x12 = tf.concat([x,x11],axis=-1)#合并 256*256*128# 卷積x12 = tf.keras.layers.Conv2D(64,3,padding="same",activation="relu")(x12)x12 = tf.keras.layers.BatchNormalization()(x12)x12 = tf.keras.layers.Conv2D(64,3,padding="same",activation="relu")(x12)x12 = tf.keras.layers.BatchNormalization()(x12) # 256*256*64# 輸出層 第五部分output =tf.keras.layers.Conv2D(34,1,padding="same",activation="softmax")(x12)# 256*256*34return tf.keras.Model(inputs=inputs,outputs=output) model = create_model() tf.keras.utils.plot_model(model) # 繪制模型圖 # tf.keras.metrics.MeanIoU(num_classes=34) # 根據獨熱編碼進行計算 # 我們是順序編碼 需要更改類 class MeanIou(tf.keras.metrics.MeanIoU): # 繼承這個類 def __call__(self,y_true,y_pred,sample_weight=None): y_pred = tf.argmax(u_pred,axis=-1) return super().__call__(y_true,y_pred,sample_weight=sample_weight) # 編譯模型 model.compile(optimizer="adam",loss="sparse_categorical_crossentropy",metrics=["acc",MeanIou(num_classes=34)]) # 訓練 history = model.fit(dataset_train,epochs=60,steps_per_epoch=step_per_epoch,validation_steps=val_step,validation_data=dataset_val)
列子
總結
以上是生活随笔為你收集整理的深度学习-Tensorflow2.2-图像处理{10}-UNET图像语义分割模型-24的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 深度学习-Tensorflow2.2-图
- 下一篇: 深度学习-Tensorflow2.2-一