当前位置: 首页 > news >正文

建设网站需要收费吗django做的网站有哪些

建设网站需要收费吗,django做的网站有哪些,淄博专业做网站,河南南阳油田网站建设第一步#xff1a;准备数据 人像精细分割数据#xff0c;可分割出头发丝#xff0c;为PPM-100开源数据 第二步#xff1a;搭建模型 MODNet网络结构如图所示#xff0c;主要包含3个部分#xff1a;semantic estimation#xff08;S分支#xff09;、detail prediction… 第一步准备数据 人像精细分割数据可分割出头发丝为PPM-100开源数据 第二步搭建模型 MODNet网络结构如图所示主要包含3个部分semantic estimationS分支、detail predictionD分支、semantic-detail fusionF分支。 网络结构简单描述一下 输入一幅图像I送入三个模块S、D、F S模块在低分辨率分支进行语义估计在backbone最后一层输出接上e-ASPP得到语义feature map Sp D模块在高分辨率分支进行细节预测通过融合来自低分辨率分支的信息得到细节feature map Dp F模块融合来自低分辨率分支和高分辨率分支的信息得到alpha matte ap 对S、D、F模块均使用来自GT的显式监督信息进行监督训练。 第三步代码 1损失函数为L2损失 2网络代码 import torch import torch.nn as nn import torch.nn.functional as Ffrom .backbones import SUPPORTED_BACKBONES#------------------------------------------------------------------------------ # MODNet Basic Modules #------------------------------------------------------------------------------class IBNorm(nn.Module): Combine Instance Norm and Batch Norm into One Layerdef __init__(self, in_channels):super(IBNorm, self).__init__()in_channels in_channelsself.bnorm_channels int(in_channels / 2)self.inorm_channels in_channels - self.bnorm_channelsself.bnorm nn.BatchNorm2d(self.bnorm_channels, affineTrue)self.inorm nn.InstanceNorm2d(self.inorm_channels, affineFalse)def forward(self, x):bn_x self.bnorm(x[:, :self.bnorm_channels, ...].contiguous())in_x self.inorm(x[:, self.bnorm_channels:, ...].contiguous())return torch.cat((bn_x, in_x), 1)class Conv2dIBNormRelu(nn.Module): Convolution IBNorm ReLudef __init__(self, in_channels, out_channels, kernel_size, stride1, padding0, dilation1, groups1, biasTrue, with_ibnTrue, with_reluTrue):super(Conv2dIBNormRelu, self).__init__()layers [nn.Conv2d(in_channels, out_channels, kernel_size, stridestride, paddingpadding, dilationdilation, groupsgroups, biasbias)]if with_ibn: layers.append(IBNorm(out_channels))if with_relu:layers.append(nn.ReLU(inplaceTrue))self.layers nn.Sequential(*layers)def forward(self, x):return self.layers(x) class SEBlock(nn.Module): SE Block Proposed in https://arxiv.org/pdf/1709.01507.pdf def __init__(self, in_channels, out_channels, reduction1):super(SEBlock, self).__init__()self.pool nn.AdaptiveAvgPool2d(1)self.fc nn.Sequential(nn.Linear(in_channels, int(in_channels // reduction), biasFalse),nn.ReLU(inplaceTrue),nn.Linear(int(in_channels // reduction), out_channels, biasFalse),nn.Sigmoid())def forward(self, x):b, c, _, _ x.size()w self.pool(x).view(b, c)w self.fc(w).view(b, c, 1, 1)return x * w.expand_as(x)#------------------------------------------------------------------------------ # MODNet Branches #------------------------------------------------------------------------------class LRBranch(nn.Module): Low Resolution Branch of MODNetdef __init__(self, backbone):super(LRBranch, self).__init__()enc_channels backbone.enc_channelsself.backbone backboneself.se_block SEBlock(enc_channels[4], enc_channels[4], reduction4)self.conv_lr16x Conv2dIBNormRelu(enc_channels[4], enc_channels[3], 5, stride1, padding2)self.conv_lr8x Conv2dIBNormRelu(enc_channels[3], enc_channels[2], 5, stride1, padding2)self.conv_lr Conv2dIBNormRelu(enc_channels[2], 1, kernel_size3, stride2, padding1, with_ibnFalse, with_reluFalse)def forward(self, img, inference):enc_features self.backbone.forward(img)enc2x, enc4x, enc32x enc_features[0], enc_features[1], enc_features[4]enc32x self.se_block(enc32x)lr16x F.interpolate(enc32x, scale_factor2, modebilinear, align_cornersFalse)lr16x self.conv_lr16x(lr16x)lr8x F.interpolate(lr16x, scale_factor2, modebilinear, align_cornersFalse)lr8x self.conv_lr8x(lr8x)pred_semantic Noneif not inference:lr self.conv_lr(lr8x)pred_semantic torch.sigmoid(lr)return pred_semantic, lr8x, [enc2x, enc4x] class HRBranch(nn.Module): High Resolution Branch of MODNetdef __init__(self, hr_channels, enc_channels):super(HRBranch, self).__init__()self.tohr_enc2x Conv2dIBNormRelu(enc_channels[0], hr_channels, 1, stride1, padding0)self.conv_enc2x Conv2dIBNormRelu(hr_channels 3, hr_channels, 3, stride2, padding1)self.tohr_enc4x Conv2dIBNormRelu(enc_channels[1], hr_channels, 1, stride1, padding0)self.conv_enc4x Conv2dIBNormRelu(2 * hr_channels, 2 * hr_channels, 3, stride1, padding1)self.conv_hr4x nn.Sequential(Conv2dIBNormRelu(3 * hr_channels 3, 2 * hr_channels, 3, stride1, padding1),Conv2dIBNormRelu(2 * hr_channels, 2 * hr_channels, 3, stride1, padding1),Conv2dIBNormRelu(2 * hr_channels, hr_channels, 3, stride1, padding1),)self.conv_hr2x nn.Sequential(Conv2dIBNormRelu(2 * hr_channels, 2 * hr_channels, 3, stride1, padding1),Conv2dIBNormRelu(2 * hr_channels, hr_channels, 3, stride1, padding1),Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride1, padding1),Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride1, padding1),)self.conv_hr nn.Sequential(Conv2dIBNormRelu(hr_channels 3, hr_channels, 3, stride1, padding1),Conv2dIBNormRelu(hr_channels, 1, kernel_size1, stride1, padding0, with_ibnFalse, with_reluFalse),)def forward(self, img, enc2x, enc4x, lr8x, inference):img2x F.interpolate(img, scale_factor1/2, modebilinear, align_cornersFalse)img4x F.interpolate(img, scale_factor1/4, modebilinear, align_cornersFalse)enc2x self.tohr_enc2x(enc2x)hr4x self.conv_enc2x(torch.cat((img2x, enc2x), dim1))enc4x self.tohr_enc4x(enc4x)hr4x self.conv_enc4x(torch.cat((hr4x, enc4x), dim1))lr4x F.interpolate(lr8x, scale_factor2, modebilinear, align_cornersFalse)hr4x self.conv_hr4x(torch.cat((hr4x, lr4x, img4x), dim1))hr2x F.interpolate(hr4x, scale_factor2, modebilinear, align_cornersFalse)hr2x self.conv_hr2x(torch.cat((hr2x, enc2x), dim1))pred_detail Noneif not inference:hr F.interpolate(hr2x, scale_factor2, modebilinear, align_cornersFalse)hr self.conv_hr(torch.cat((hr, img), dim1))pred_detail torch.sigmoid(hr)return pred_detail, hr2xclass FusionBranch(nn.Module): Fusion Branch of MODNetdef __init__(self, hr_channels, enc_channels):super(FusionBranch, self).__init__()self.conv_lr4x Conv2dIBNormRelu(enc_channels[2], hr_channels, 5, stride1, padding2)self.conv_f2x Conv2dIBNormRelu(2 * hr_channels, hr_channels, 3, stride1, padding1)self.conv_f nn.Sequential(Conv2dIBNormRelu(hr_channels 3, int(hr_channels / 2), 3, stride1, padding1),Conv2dIBNormRelu(int(hr_channels / 2), 1, 1, stride1, padding0, with_ibnFalse, with_reluFalse),)def forward(self, img, lr8x, hr2x):lr4x F.interpolate(lr8x, scale_factor2, modebilinear, align_cornersFalse)lr4x self.conv_lr4x(lr4x)lr2x F.interpolate(lr4x, scale_factor2, modebilinear, align_cornersFalse)f2x self.conv_f2x(torch.cat((lr2x, hr2x), dim1))f F.interpolate(f2x, scale_factor2, modebilinear, align_cornersFalse)f self.conv_f(torch.cat((f, img), dim1))pred_matte torch.sigmoid(f)return pred_matte#------------------------------------------------------------------------------ # MODNet #------------------------------------------------------------------------------class MODNet(nn.Module): Architecture of MODNetdef __init__(self, in_channels3, hr_channels32, backbone_archmobilenetv2, backbone_pretrainedTrue):super(MODNet, self).__init__()self.in_channels in_channelsself.hr_channels hr_channelsself.backbone_arch backbone_archself.backbone_pretrained backbone_pretrainedself.backbone SUPPORTED_BACKBONES[self.backbone_arch](self.in_channels)self.lr_branch LRBranch(self.backbone)self.hr_branch HRBranch(self.hr_channels, self.backbone.enc_channels)self.f_branch FusionBranch(self.hr_channels, self.backbone.enc_channels)for m in self.modules():if isinstance(m, nn.Conv2d):self._init_conv(m)elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.InstanceNorm2d):self._init_norm(m)if self.backbone_pretrained:self.backbone.load_pretrained_ckpt() def forward(self, img, inference):pred_semantic, lr8x, [enc2x, enc4x] self.lr_branch(img, inference)pred_detail, hr2x self.hr_branch(img, enc2x, enc4x, lr8x, inference)pred_matte self.f_branch(img, lr8x, hr2x)return pred_semantic, pred_detail, pred_mattedef freeze_norm(self):norm_types [nn.BatchNorm2d, nn.InstanceNorm2d]for m in self.modules():for n in norm_types:if isinstance(m, n):m.eval()continuedef _init_conv(self, conv):nn.init.kaiming_uniform_(conv.weight, a0, modefan_in, nonlinearityrelu)if conv.bias is not None:nn.init.constant_(conv.bias, 0)def _init_norm(self, norm):if norm.weight is not None:nn.init.constant_(norm.weight, 1)nn.init.constant_(norm.bias, 0)第四步搭建GUI界面 第五步整个工程的内容 有训练代码和训练好的模型以及训练过程提供数据提供GUI界面代码 代码见基于Pytorch框架的深度学习MODNet网络精细人像分割系统源码 有问题可以私信或者留言有问必答 ​
http://www.w-s-a.com/news/175136/

相关文章:

  • 网站建设里的知识长沙网络营销公司
  • 网站建设与维护大作业pc网站转换成微网站
  • php网站开发经典教材东莞网站开发
  • 教育培训手机网站模板下载跨境电商培训哪家最好
  • 网站开发淄博网站被降权会发生什么影响吗
  • 网站开发常用的语言东城手机网站制作
  • 微小店网站建设平台手机优化加速有什么用
  • 沈阳酒店企业网站制作公司竞价网站怎么做seo
  • 中山企业网站多少钱学网站建设的好处
  • 做官网网站哪家公司好jianux wordpress
  • 插件素材网站新站seo优化快速上排名
  • 网站注销主体填写原因asp响应式h5网站源码下载
  • 电商类网站模板下载济南市建设网官网
  • 万户网络做网站如何采集器wordpress
  • 襄阳网站建设企业查看 wordpress 插件
  • 网站地址申请京东联盟怎么做网站
  • 三亚市城乡建设局网站网站口碑营销
  • 图书租借网站 开发企业网站搜索优化外
  • 新乡个人网站建设哪家好免费的图片做视频在线观看网站
  • 洛阳工程建设信息网站山西响应式网页建设哪里好
  • 企业网站建设市场的另一面wordpress分类插件
  • 网站建设名头公司展厅装修
  • 小型购物网站开发费用郑州企业网站模板建站
  • 个体商户建自己的网站做销售建设积分兑换官方网站
  • 网站建设与维护培训网页制作专业用语
  • 建站特别慢wordpress网页制作与设计项目策划书
  • 视频制作素材免费网站头像制作在线生成器
  • 网站建设是不是可以免费建站广州做网站 信科网络
  • 闸北区网站设计叫别人做网站后怎么更改密码
  • 为什么想做网站运营建设工程教育网站