woocommerce做零售网站,wordpress如何删除以前主题的缓存,正规手表回收网站,阿里云虚拟主机和云服务器的区别文章目录 文章目录 00 写在前面01 基于Pytorch版本的UNet代码02 论文下载 00 写在前面
通过U-Net代码学习#xff0c;可以学习基于Pytorch的网络结构模块化编程#xff0c;对于后续学习其他更复杂网络模型#xff0c;有很大的帮助作用。
在01中#xff0c;可以根据U-Net… 文章目录 文章目录 00 写在前面01 基于Pytorch版本的UNet代码02 论文下载 00 写在前面
通过U-Net代码学习可以学习基于Pytorch的网络结构模块化编程对于后续学习其他更复杂网络模型有很大的帮助作用。
在01中可以根据U-Net的网络结构开头图片进行模块化编程。包括卷积模块定义、上采样模块定义、输出卷积层定义、损失函数定义、网络模型定义等。
在模型调试过程中可以先通过简单测试代码进行代码调试。
01 基于Pytorch版本的UNet代码
# 库函数调用
import torch
import torch.nn as nn
from network.ops import TotalVariation
from torchvision.models import vgg19# 卷积块定义
class conv_block(nn.Module):def __init__(self,ch_in,ch_out):super(conv_block,self).__init__()self.conv nn.Sequential(nn.Conv2d(ch_in, ch_out, kernel_size3,stride1,padding1,biasTrue),#nn.BatchNorm2d(ch_out),nn.ReLU(inplaceTrue),nn.Conv2d(ch_out, ch_out, kernel_size3,stride1,padding1,biasTrue),#nn.BatchNorm2d(ch_out),nn.ReLU(inplaceTrue))def forward(self,x):x self.conv(x)return x# 上采样部分定义
class up_conv(nn.Module):def __init__(self,ch_in,ch_out):super(up_conv,self).__init__()self.up nn.Sequential(nn.Upsample(scale_factor2),nn.Conv2d(ch_in,ch_out,kernel_size3,stride1,padding1,biasTrue),#nn.BatchNorm2d(ch_out),nn.ReLU(inplaceTrue))def forward(self,x):x self.up(x)return x# 输出卷积层定义
class outconv(nn.Module):def __init__(self, in_ch, out_ch):super(outconv, self).__init__()self.conv nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size3, stride1, padding1),#nn.ReLU(inplaceTrue),)def forward(self, x):x self.conv(x)return xclass UNET_MODEL(nn.Module):def __init__(self, img_ch3, output_ch1,filter_dim64):super().__init__()self.Maxpool nn.MaxPool2d(kernel_size2, stride2)self.Conv1 conv_block(ch_inimg_ch, ch_outfilter_dim)self.Conv2 conv_block(ch_in64, ch_out128)self.Conv3 conv_block(ch_in128, ch_out256)self.Conv4 conv_block(ch_in256, ch_out512)self.Conv5 conv_block(ch_in512, ch_out1024)self.Up5 up_conv(ch_in1024, ch_out512)self.Up_conv5 conv_block(ch_in1024, ch_out512)self.Up4 up_conv(ch_in512, ch_out256)self.Up_conv4 conv_block(ch_in512, ch_out256)self.Up3 up_conv(ch_in256, ch_out128)self.Up_conv3 conv_block(ch_in256, ch_out128)self.Up2 up_conv(ch_in128, ch_out64)self.Up_conv2 conv_block(ch_in128, ch_out64)self.Conv11 outconv(64, output_ch)def forward(self, x):# encoding pathx1 self.Conv1(x)x2 self.Maxpool(x1)x2 self.Conv2(x2)x3 self.Maxpool(x2)x3 self.Conv3(x3)x4 self.Maxpool(x3)x4 self.Conv4(x4)x5 self.Maxpool(x4)x5 self.Conv5(x5)# decoding concat pathd5 self.Up5(x5)d5 torch.cat((x4, d5), dim1)d5 self.Up_conv5(d5)d4 self.Up4(d5)d4 torch.cat((x3, d4), dim1)d4 self.Up_conv4(d4)d3 self.Up3(d4)d3 torch.cat((x2, d3), dim1)d3 self.Up_conv3(d3)d2 self.Up2(d3)d2 torch.cat((x1, d2), dim1)d2 self.Up_conv2(d2)T2 self.Conv11(d2)return T2# 损失函数定义
class loss_fun(nn.Module):def __init__(self, regular):super().__init__()self.tv TotalVariation()self.regular regulardef forward(self, x, y):ychange y[:, 0:1, :, :]mask y[:, 1:2, :, :]return torch.add(torch.mean(torch.pow((x[:,:,:,:] - y[:,2:3,:,:])*ychange, 2)), self.regular* torch.mean(self.tv(x[:, :, :, :]*mask)))class loss_fun_total(nn.Module):def __init__(self, regular):super().__init__()self.tv TotalVariation()self.regular regulardef forward(self, x, y):loss1 torch.mean(torch.pow((x[:,0:1,:,:] - y[:,0:1,:,:]*10), 2))return loss1# 测试代码
if __name__ __main__:input_channels 4output_channels 1x torch.ones([32, 4, 256, 256])model UNET_MODEL(input_channels, output_channels)print(model initialization finished!)f model(x)print(f)02 论文下载
U-Net: deep learning for cell counting, detection, and morphometry U-Net: Convolutional Networks for Biomedical Image Segmentation