日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問(wèn) 生活随笔!

生活随笔

當(dāng)前位置: 首頁(yè) > 编程资源 > 编程问答 >内容正文

编程问答

Unet项目解析(6): 图像分块、整合 / 数据对齐、网络输出转成图像

發(fā)布時(shí)間:2025/3/15 编程问答 17 豆豆
生活随笔 收集整理的這篇文章主要介紹了 Unet项目解析(6): 图像分块、整合 / 数据对齐、网络输出转成图像 小編覺(jué)得挺不錯(cuò)的,現(xiàn)在分享給大家,幫大家做個(gè)參考.

項(xiàng)目GitHub主頁(yè):https://github.com/orobix/retina-unet

參考論文:Retina blood vessel segmentation with a convolution neural network (U-net)


1. 訓(xùn)練數(shù)據(jù)

1.1 訓(xùn)練圖像、訓(xùn)練金標(biāo)準(zhǔn)隨機(jī)分塊

主代碼:

# 訓(xùn)練集太少,采用分塊的方法進(jìn)行訓(xùn)練 def get_data_training(DRIVE_train_imgs_original, #訓(xùn)練圖像路徑DRIVE_train_groudTruth, #金標(biāo)準(zhǔn)圖像路徑patch_height,patch_width,N_subimgs,inside_FOV):train_imgs_original = load_hdf5(DRIVE_train_imgs_original)train_masks = load_hdf5(DRIVE_train_groudTruth) #visualize(group_images(train_imgs_original[0:20,:,:,:],5),'imgs_train').show() train_imgs = my_PreProc(train_imgs_original) # 圖像預(yù)處理 歸一化等train_masks = train_masks/255.train_imgs = train_imgs[:,:,9:574,:] # 圖像裁剪 size=565*565train_masks = train_masks[:,:,9:574,:] # 圖像裁剪 size=565*565data_consistency_check(train_imgs,train_masks) # 訓(xùn)練圖像和金標(biāo)準(zhǔn)圖像一致性檢查assert(np.min(train_masks)==0 and np.max(train_masks)==1) #金標(biāo)準(zhǔn)圖像 2類 0-1print ("\n train images/masks shape:")print (train_imgs.shape)print ("train images range (min-max): " +str(np.min(train_imgs)) +' - '+str(np.max(train_imgs)))print ("train masks are within 0-1\n")# 從整張圖像中-隨機(jī)提取-訓(xùn)練子塊patches_imgs_train, patches_masks_train =extract_random(train_imgs,train_masks,patch_height,patch_width,N_subimgs,inside_FOV)data_consistency_check(patches_imgs_train, patches_masks_train) # 訓(xùn)練圖像子塊和金標(biāo)準(zhǔn)圖像子塊一致性檢查print ("\n train PATCHES images/masks shape:")print (patches_imgs_train.shape)print ("train PATCHES images range (min-max): " +str(np.min(patches_imgs_train)) +' - '+str(np.max(patches_imgs_train)))return patches_imgs_train, patches_masks_train

隨機(jī)提取子塊:

# 訓(xùn)練集圖像 隨機(jī) 提取子塊 def extract_random(full_imgs,full_masks, patch_h,patch_w, N_patches, inside=True):if (N_patches%full_imgs.shape[0] != 0): # 檢驗(yàn)每張圖像應(yīng)該提取多少塊print "N_patches: plase enter a multiple of 20"exit()assert (len(full_imgs.shape)==4 and len(full_masks.shape)==4) # 張量尺寸檢驗(yàn)assert (full_imgs.shape[1]==1 or full_imgs.shape[1]==3) # 通道檢驗(yàn)assert (full_masks.shape[1]==1) # 通道檢驗(yàn)assert (full_imgs.shape[2] == full_masks.shape[2] and full_imgs.shape[3] == full_masks.shape[3]) # 尺寸檢驗(yàn)patches = np.empty((N_patches,full_imgs.shape[1],patch_h,patch_w)) # 訓(xùn)練圖像總子塊patches_masks = np.empty((N_patches,full_masks.shape[1],patch_h,patch_w)) # 訓(xùn)練金標(biāo)準(zhǔn)總子塊img_h = full_imgs.shape[2] img_w = full_imgs.shape[3] patch_per_img = int(N_patches/full_imgs.shape[0]) # 每張圖像中提取的子塊數(shù)量print ("patches per full image: " +str(patch_per_img))iter_tot = 0 # 圖像子塊總量計(jì)數(shù)器for i in range(full_imgs.shape[0]): # 遍歷每一張圖像k=0 # 每張圖像子塊計(jì)數(shù)器while k <patch_per_img:x_center = random.randint(0+int(patch_w/2),img_w-int(patch_w/2)) # 塊中心的范圍y_center = random.randint(0+int(patch_h/2),img_h-int(patch_h/2))if inside==True:if is_patch_inside_FOV(x_center,y_center,img_w,img_h,patch_h)==False:continuepatch = full_imgs[i,:,y_center-int(patch_h/2):y_center+int(patch_h/2),x_center-int(patch_w/2):x_center+int(patch_w/2)]patch_mask = full_masks[i,:,y_center-int(patch_h/2):y_center+int(patch_h/2),x_center-int(patch_w/2):x_center+int(patch_w/2)]patches[iter_tot]=patch # size=[Npatches, 3, patch_h, patch_w]patches_masks[iter_tot]=patch_mask # size=[Npatches, 1, patch_h, patch_w]iter_tot +=1 # 子塊總量計(jì)數(shù)器k+=1 # 每張圖像子塊總量計(jì)數(shù)器return patches, patches_masks

數(shù)據(jù)一致性檢查函數(shù):

# 訓(xùn)練集圖像 和 金標(biāo)準(zhǔn)圖像一致性檢驗(yàn) def data_consistency_check(imgs,masks):assert(len(imgs.shape)==len(masks.shape))assert(imgs.shape[0]==masks.shape[0])assert(imgs.shape[2]==masks.shape[2])assert(imgs.shape[3]==masks.shape[3])assert(masks.shape[1]==1)assert(imgs.shape[1]==1 or imgs.shape[1]==3)

1.2 訓(xùn)練金標(biāo)準(zhǔn)改寫(xiě)成Une輸出形式

# 將金標(biāo)準(zhǔn)圖像改寫(xiě)成模型輸出形式 def masks_Unet(masks): # size=[Npatches, 1, patch_height, patch_width]assert (len(masks.shape)==4)assert (masks.shape[1]==1 )im_h = masks.shape[2]im_w = masks.shape[3]masks = np.reshape(masks,(masks.shape[0],im_h*im_w)) # 單像素建模new_masks = np.empty((masks.shape[0],im_h*im_w,2)) # 二分類輸出for i in range(masks.shape[0]):for j in range(im_h*im_w):if masks[i,j] == 0:new_masks[i,j,0]=1 # 金標(biāo)準(zhǔn)圖像的反轉(zhuǎn)new_masks[i,j,1]=0 # 金標(biāo)準(zhǔn)圖像else:new_masks[i,j,0]=0new_masks[i,j,1]=1return new_masks

2. 網(wǎng)絡(luò)輸出轉(zhuǎn)換成圖像子塊

# 網(wǎng)絡(luò)輸出 size=[Npatches, patch_height*patch_width, 2] def pred_to_imgs(pred, patch_height, patch_width, mode="original"):assert (len(pred.shape)==3) assert (pred.shape[2]==2 ) # 確認(rèn)是否為二分類pred_images = np.empty((pred.shape[0],pred.shape[1])) #(Npatches,height*width)if mode=="original": # 網(wǎng)絡(luò)概率輸出for i in range(pred.shape[0]):for pix in range(pred.shape[1]):pred_images[i,pix]=pred[i,pix,1] #pred[:, :, 0] 是反分割圖像輸出 pred[:, :, 1]是分割輸出elif mode=="threshold": # 網(wǎng)絡(luò)概率-閾值輸出for i in range(pred.shape[0]):for pix in range(pred.shape[1]):if pred[i,pix,1]>=0.5:pred_images[i,pix]=1else:pred_images[i,pix]=0else:print ("mode " +str(mode) +" not recognized, it can be 'original' or 'threshold'")exit()# 改寫(xiě)成(Npatches,1, height, width)pred_images = np.reshape(pred_images,(pred_images.shape[0],1, patch_height, patch_width)) return pred_images

3. 測(cè)試圖像按順序分塊、預(yù)測(cè)子塊重新整合成圖像

3.1 測(cè)試圖像分塊

def get_data_testing_overlap(DRIVE_test_imgs_original, DRIVE_test_groudTruth, Imgs_to_test, # 20patch_height, patch_width, stride_height, stride_width):test_imgs_original = load_hdf5(DRIVE_test_imgs_original)test_masks = load_hdf5(DRIVE_test_groudTruth)test_imgs = my_PreProc(test_imgs_original)test_masks = test_masks/255.test_imgs = test_imgs[0:Imgs_to_test,:,:,:]test_masks = test_masks[0:Imgs_to_test,:,:,:]test_imgs = paint_border_overlap(test_imgs, patch_height, # 拓展圖像 可以準(zhǔn)確劃分patch_width, stride_height, stride_width)assert(np.max(test_masks)==1 and np.min(test_masks)==0)print ("\n test images shape:")print (test_imgs.shape)print ("\n test mask shape:")print (test_masks.shape)print ("test images range (min-max): " +str(np.min(test_imgs)) +' - '+str(np.max(test_imgs)))# 按照順序提取圖像快 方便后續(xù)進(jìn)行圖像恢復(fù)(作者采用了overlap策略)patches_imgs_test = extract_ordered_overlap(test_imgs,patch_height,patch_width,stride_height,stride_width)print ("\n test PATCHES images shape:")print (patches_imgs_test.shape)print ("test PATCHES images range (min-max): " +str(np.min(patches_imgs_test)) +' - '+str(np.max(patches_imgs_test)))return patches_imgs_test, test_imgs.shape[2], test_imgs.shape[3], test_masks #原始大小

原始圖像進(jìn)行拓展填充:

def paint_border_overlap(full_imgs, patch_h, patch_w, stride_h, stride_w):assert (len(full_imgs.shape)==4) #4D arraysassert (full_imgs.shape[1]==1 or full_imgs.shape[1]==3) #check the channel is 1 or 3img_h = full_imgs.shape[2] #height of the full imageimg_w = full_imgs.shape[3] #width of the full imageleftover_h = (img_h-patch_h)%stride_h #leftover on the h dimleftover_w = (img_w-patch_w)%stride_w #leftover on the w dimif (leftover_h != 0): #change dimension of img_htmp_full_imgs = np.zeros((full_imgs.shape[0],full_imgs.shape[1],img_h+(stride_h-leftover_h),img_w))tmp_full_imgs[0:full_imgs.shape[0],0:full_imgs.shape[1],0:img_h,0:img_w] = full_imgsfull_imgs = tmp_full_imgsif (leftover_w != 0): #change dimension of img_wtmp_full_imgs = np.zeros((full_imgs.shape[0],full_imgs.shape[1],full_imgs.shape[2],img_w+(stride_w - leftover_w)))tmp_full_imgs[0:full_imgs.shape[0],0:full_imgs.shape[1],0:full_imgs.shape[2],0:img_w] = full_imgsfull_imgs = tmp_full_imgsreturn full_imgs

按順序提取圖像子塊:

# 按照順序?qū)ν卣购蟮膱D像進(jìn)行子塊采樣 def extract_ordered_overlap(full_imgs, patch_h, patch_w,stride_h,stride_w):assert (len(full_imgs.shape)==4) assert (full_imgs.shape[1]==1 or full_imgs.shape[1]==3) img_h = full_imgs.shape[2] img_w = full_imgs.shape[3] assert ((img_h-patch_h)%stride_h==0 and (img_w-patch_w)%stride_w==0)N_patches_img = ((img_h-patch_h)//stride_h+1)*((img_w-patch_w)//stride_w+1) # 每張圖像采集到的子圖像N_patches_tot = N_patches_img*full_imgs.shape[0] # 測(cè)試集總共的子圖像數(shù)量patches = np.empty((N_patches_tot,full_imgs.shape[1],patch_h,patch_w))iter_tot = 0 for i in range(full_imgs.shape[0]): for h in range((img_h-patch_h)//stride_h+1):for w in range((img_w-patch_w)//stride_w+1):patch = full_imgs[i,:,h*stride_h:(h*stride_h)+patch_h,w*stride_w:(w*stride_w)+patch_w]patches[iter_tot]=patchiter_tot +=1 #totalassert (iter_tot==N_patches_tot)return patches

3.2 對(duì)于圖像子塊進(jìn)行復(fù)原

# [Npatches, 1, patch_h, patch_w] img_h=new_height[588] img_w=new_width[568] stride-[10,10] def recompone_overlap(preds, img_h, img_w, stride_h, stride_w):assert (len(preds.shape)==4) # 檢查張量尺寸assert (preds.shape[1]==1 or preds.shape[1]==3)patch_h = preds.shape[2]patch_w = preds.shape[3]N_patches_h = (img_h-patch_h)//stride_h+1 # img_h方向包括的patch_h數(shù)量N_patches_w = (img_w-patch_w)//stride_w+1 # img_w方向包括的patch_w數(shù)量N_patches_img = N_patches_h * N_patches_w # 每張圖像包含的patch的數(shù)目assert (preds.shape[0]%N_patches_img==0 N_full_imgs = preds.shape[0]//N_patches_img # 全幅圖像的數(shù)目full_prob = np.zeros((N_full_imgs,preds.shape[1],img_h,img_w))full_sum = np.zeros((N_full_imgs,preds.shape[1],img_h,img_w))k = 0 #迭代所有的子塊for i in range(N_full_imgs):for h in range((img_h-patch_h)//stride_h+1):for w in range((img_w-patch_w)//stride_w+1):full_prob[i,:,h*stride_h:(h*stride_h)+patch_h,w*stride_w:(w*stride_w)+patch_w]+=preds[k]full_sum[i,:,h*stride_h:(h*stride_h)+patch_h,w*stride_w:(w*stride_w)+patch_w]+=1k+=1assert(k==preds.shape[0])assert(np.min(full_sum)>=1.0) final_avg = full_prob/full_sum # 疊加概率 / 疊加權(quán)重 : 采用了均值的方法print final_avg.shapeassert(np.max(final_avg)<=1.0)assert(np.min(final_avg)>=0.0)return final_avg

總結(jié)

以上是生活随笔為你收集整理的Unet项目解析(6): 图像分块、整合 / 数据对齐、网络输出转成图像的全部?jī)?nèi)容,希望文章能夠幫你解決所遇到的問(wèn)題。

如果覺(jué)得生活随笔網(wǎng)站內(nèi)容還不錯(cuò),歡迎將生活随笔推薦給好友。

主站蜘蛛池模板: 久久av网址 | 国产精品久久久久久久久久东京 | 国产哺乳奶水91在线播放 | 中文字幕在线视频免费播放 | 182tv福利视频| 波多野结衣一区二 | 国产精品厕所 | 男生捅女生肌肌 | 国产欧美视频在线观看 | 91亚色| 国产精品毛片一区二区三区 | 在线观看免费视频国产 | 外国一级片 | 91精品视频在线播放 | 亚洲精品久久久久久久蜜桃 | 欧美美女一区二区 | 欧美视频精品在线 | 美国黄色av| 中文字幕超清在线观看 | 狠狠五月天 | 青娱乐最新视频 | 国产一区二区三区四区五区在线 | 538国产精品视频一区二区 | 国产视频69| 天天碰天天 | 美女撒尿无遮挡网站 | 爱蜜臀av | 亚洲高清视频网站 | 一级一片免费播放 | 久久综合av| 日韩黄色三级 | 一区二区三区高清不卡 | 台湾佬美性中文娱乐 | 天天操网 | 激情综合区 | 福利在线视频观看 | 1024av在线| 亚洲中文字幕无码爆乳av | 久久精品视频免费看 | 樱花草av | 高h调教冰块play男男双性文 | 男人操女人的网站 | 成人黄色免费视频 | 欧美高清精品 | 久久久九九九九 | 日韩精品极品视频在线观看免费 | 啪啪在线视频 | 狠狠操狠狠 | 成人动作片| 另一种灿烂生活 | 久久这里精品 | 求个黄色网址 | 奇米色婷婷 | 亚洲成人免费视频 | 熟妇人妻中文av无码 | 日本成人免费在线视频 | √资源天堂中文在线视频 | 亚洲图片在线 | 日韩在线电影一区 | 亚洲欧美一区二区三区久久 | 欧美日韩国产高清 | 久久久久久久久久久久电影 | juliaann欧美二区三区 | 丝袜视频在线 | 毛片.com | 天堂在线中文字幕 | 91在线国产观看 | 多毛的亚洲人毛茸茸 | 国产成人啪精品 | 日本特级片 | 成人免费网站黄 | 91视频爱爱 | 少妇大叫太粗太大爽一区二区 | 亚洲一区在线不卡 | av色在线观看 | 国产日韩欧美在线 | 国产真人做爰视频免费 | chinese hd av| 日本精品一区在线 | 成人午夜视频在线播放 | 黄色免费在线播放 | 一道本一区二区 | 精品免费在线视频 | 久久精品国产清自在天天线 | 精品国产系列 | 免费黄色在线网址 | 青青超碰| 国产综合网站 | 9999视频 | 福利视频黄色 | 免费看黄色三级三级 | 日本在线免费观看视频 | 97国产精品视频人人做人人爱 | 91色网站 | 中文字幕日日 | 在线日韩三级 | 亚洲美女屁股眼交3 | 色乱码一区二区三区在线男奴 | 国产123区在线观看 91国产一区二区 |