当前位置: 首页 > news >正文

企业网站建设方案费用预算建设网站公司专业服务

企业网站建设方案费用预算,建设网站公司专业服务,网页设计技巧,论文答辩ppt模板免费下载 素材第一步#xff1a;EfficientDeRain介绍 EfficientDeRain 是一个针对单张图像去雨的开源项目#xff0c;该项目由清华大学的研究团队提出#xff0c;主要用于处理图像中的雨水干扰#xff0c;恢复图像的真实场景 核心功能 图像去雨#xff1a;EfficientDeRain 通过学习像素…第一步EfficientDeRain介绍 EfficientDeRain 是一个针对单张图像去雨的开源项目该项目由清华大学的研究团队提出主要用于处理图像中的雨水干扰恢复图像的真实场景 核心功能 图像去雨EfficientDeRain 通过学习像素级的膨胀滤波有效去除图像中的雨水干扰恢复清晰图像。         高效率项目设计考虑到了效率能够在较短的时间内处理大量图像适用于需要快速处理的应用场景。         可扩展性项目提供了多种数据集的预训练模型支持自定义数据集的训练方便用户根据具体需求进行优化。 第二步LYT-Net网络结构 该算法的原理非常简单最重要的思想是把去雨看为图像的逐像素滤波问题。而滤波操作是高度优化的操作在GPU上的实现必定非常快。 看懂下面这张图即可完全理解作者的算法思想 图像经深度卷积网络学习逐像素的卷积核参数然后与原图做卷积即得最终的去雨后图像训练的时候需要有雨、无雨的图像对。 作者指出尽管上述思想没有问题但因为逐像素卷积核大小的问题如果只学习普通卷积核即每个像素预测三个通道的3x(3x3)个参数如上图中的a部分对于雨条较大的图像很难取得满意的效果因为毕竟卷积的过程是寻找周围非雨条像素赋以高权重的加权卷积核如果没有覆盖到非雨条像素肯定效果不好。 为在尺度上应对大雨条所以作者做了改进让神经网络预测多尺度的空洞卷积核如b子图中是预测4个尺度的空洞卷积核空洞卷积后再把结果加权获得最终的去雨图像。 所以算法的核心思路可总结为学习多尺度空洞卷积图像加权融合 第三步模型代码展示 import torch import torch.nn as nn import torch.nn.functional as F import numpy as np# ---------------------------------------- # Initialize the networks # ---------------------------------------- def weights_init(net, init_type normal, init_gain 0.02):Initialize network weights.Parameters:net (network) -- network to be initializedinit_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonalinit_gain (float) -- scaling factor for normal, xavier and orthogonalIn our paper, we choose the default setting: zero mean Gaussian distribution with a standard deviation of 0.02def init_func(m):classname m.__class__.__name__if hasattr(m, weight) and classname.find(Conv) ! -1:if init_type normal:torch.nn.init.normal_(m.weight.data, 0.0, init_gain)elif init_type xavier:torch.nn.init.xavier_normal_(m.weight.data, gain init_gain)elif init_type kaiming:torch.nn.init.kaiming_normal_(m.weight.data, a 0, mode fan_in)elif init_type orthogonal:torch.nn.init.orthogonal_(m.weight.data, gain init_gain)else:raise NotImplementedError(initialization method [%s] is not implemented % init_type)elif classname.find(BatchNorm2d) ! -1:torch.nn.init.normal_(m.weight.data, 1.0, 0.02)torch.nn.init.constant_(m.bias.data, 0.0)# apply the initialization function init_funcprint(initialize network with %s type % init_type)net.apply(init_func)# ---------------------------------------- # Kernel Prediction Network (KPN) # ---------------------------------------- class Basic(nn.Module):def __init__(self, in_ch, out_ch, g16, channel_attFalse, spatial_attFalse):super(Basic, self).__init__()self.channel_att channel_attself.spatial_att spatial_attself.conv1 nn.Sequential(nn.Conv2d(in_channelsin_ch, out_channelsout_ch, kernel_size3, stride1, padding1),# nn.BatchNorm2d(out_ch),nn.ReLU(),nn.Conv2d(in_channelsout_ch, out_channelsout_ch, kernel_size3, stride1, padding1),# nn.BatchNorm2d(out_ch),nn.ReLU(),nn.Conv2d(in_channelsout_ch, out_channelsout_ch, kernel_size3, stride1, padding1),# nn.BatchNorm2d(out_ch),nn.ReLU())if channel_att:self.att_c nn.Sequential(nn.Conv2d(2*out_ch, out_ch//g, 1, 1, 0),nn.ReLU(),nn.Conv2d(out_ch//g, out_ch, 1, 1, 0),nn.Sigmoid())if spatial_att:self.att_s nn.Sequential(nn.Conv2d(in_channels2, out_channels1, kernel_size7, stride1, padding3),nn.Sigmoid())def forward(self, data):Forward function.:param data::return: tensorfm self.conv1(data)if self.channel_att:# fm_pool F.adaptive_avg_pool2d(fm, (1, 1)) F.adaptive_max_pool2d(fm, (1, 1))fm_pool torch.cat([F.adaptive_avg_pool2d(fm, (1, 1)), F.adaptive_max_pool2d(fm, (1, 1))], dim1)att self.att_c(fm_pool)fm fm * attif self.spatial_att:fm_pool torch.cat([torch.mean(fm, dim1, keepdimTrue), torch.max(fm, dim1, keepdimTrue)[0]], dim1)att self.att_s(fm_pool)fm fm * attreturn fmclass KPN(nn.Module):def __init__(self, colorTrue, burst_length1, blind_estTrue, kernel_size[5], sep_convFalse,channel_attFalse, spatial_attFalse, upModebilinear, core_biasFalse):super(KPN, self).__init__()self.upMode upModeself.burst_length burst_lengthself.core_bias core_biasself.color_channel 3 if color else 1in_channel (3 if color else 1) * (burst_length if blind_est else burst_length1)out_channel (3 if color else 1) * (2 * sum(kernel_size) if sep_conv else np.sum(np.array(kernel_size) ** 2)) * burst_lengthif core_bias:out_channel (3 if color else 1) * burst_length# 各个卷积层定义# 2~5层都是均值池化3层卷积self.conv1 Basic(in_channel, 64, channel_attFalse, spatial_attFalse)self.conv2 Basic(64, 128, channel_attFalse, spatial_attFalse)self.conv3 Basic(128, 256, channel_attFalse, spatial_attFalse)self.conv4 Basic(256, 512, channel_attFalse, spatial_attFalse)self.conv5 Basic(512, 512, channel_attFalse, spatial_attFalse)# 6~8层要先上采样再卷积self.conv6 Basic(512512, 512, channel_attchannel_att, spatial_attspatial_att)self.conv7 Basic(256512, 256, channel_attchannel_att, spatial_attspatial_att)self.conv8 Basic(256128, out_channel, channel_attchannel_att, spatial_attspatial_att)self.outc nn.Conv2d(out_channel, out_channel, 1, 1, 0)self.kernel_pred KernelConv(kernel_size, sep_conv, self.core_bias)self.conv_final nn.Conv2d(in_channels12, out_channels3, kernel_size3, stride1, padding1)# 前向传播函数def forward(self, data_with_est, data, white_level1.0):forward and obtain pred image directly:param data_with_est: if not blind estimation, it is same as data:param data::return: pred_img_i and img_predconv1 self.conv1(data_with_est)conv2 self.conv2(F.avg_pool2d(conv1, kernel_size2, stride2))conv3 self.conv3(F.avg_pool2d(conv2, kernel_size2, stride2))conv4 self.conv4(F.avg_pool2d(conv3, kernel_size2, stride2))conv5 self.conv5(F.avg_pool2d(conv4, kernel_size2, stride2))# 开始上采样 同时要进行skip connectionconv6 self.conv6(torch.cat([conv4, F.interpolate(conv5, scale_factor2, modeself.upMode)], dim1))conv7 self.conv7(torch.cat([conv3, F.interpolate(conv6, scale_factor2, modeself.upMode)], dim1))#print(conv7.size())conv8 self.conv8(torch.cat([conv2, F.interpolate(conv7, scale_factor2, modeself.upMode)], dim1))# return channel K*K*Ncore self.outc(F.interpolate(conv8, scale_factor2, modeself.upMode))pred1 self.kernel_pred(data, core, white_level, rate1)pred2 self.kernel_pred(data, core, white_level, rate2)pred3 self.kernel_pred(data, core, white_level, rate3)pred4 self.kernel_pred(data, core, white_level, rate4)pred_cat torch.cat([torch.cat([torch.cat([pred1, pred2], dim1), pred3], dim1), pred4], dim1)pred self.conv_final(pred_cat)#pred self.kernel_pred(data, core, white_level, rate1)return predclass KernelConv(nn.Module):the class of computing predictiondef __init__(self, kernel_size[5], sep_convFalse, core_biasFalse):super(KernelConv, self).__init__()self.kernel_size sorted(kernel_size)self.sep_conv sep_convself.core_bias core_biasdef _sep_conv_core(self, core, batch_size, N, color, height, width):convert the sep_conv core to conv2d core2p -- p^2:param core: shape: batch*(N*2*K)*height*width:return:kernel_total sum(self.kernel_size)core core.view(batch_size, N, -1, color, height, width)if not self.core_bias:core_1, core_2 torch.split(core, kernel_total, dim2)else:core_1, core_2, core_3 torch.split(core, kernel_total, dim2)# output corecore_out {}cur 0for K in self.kernel_size:t1 core_1[:, :, cur:cur K, ...].view(batch_size, N, K, 1, 3, height, width)t2 core_2[:, :, cur:cur K, ...].view(batch_size, N, 1, K, 3, height, width)core_out[K] torch.einsum(ijklno,ijlmno-ijkmno, [t1, t2]).view(batch_size, N, K * K, color, height, width)cur K# it is a dictreturn core_out, None if not self.core_bias else core_3.squeeze()def _convert_dict(self, core, batch_size, N, color, height, width):make sure the core to be a dict, generally, only one kind of kernel size is suitable for the func.:param core: shape: batch_size*(N*K*K)*height*width:return: core_out, a dictcore_out {}core core.view(batch_size, N, -1, color, height, width)core_out[self.kernel_size[0]] core[:, :, 0:self.kernel_size[0]**2, ...]bias None if not self.core_bias else core[:, :, -1, ...]return core_out, biasdef forward(self, frames, core, white_level1.0, rate1):compute the pred image according to core and frames:param frames: [batch_size, N, 3, height, width]:param core: [batch_size, N, dict(kernel), 3, height, width]:return:if len(frames.size()) 5:batch_size, N, color, height, width frames.size()else:batch_size, N, height, width frames.size()color 1frames frames.view(batch_size, N, color, height, width)if self.sep_conv:core, bias self._sep_conv_core(core, batch_size, N, color, height, width)else:core, bias self._convert_dict(core, batch_size, N, color, height, width)img_stack []pred_img []kernel self.kernel_size[::-1]for index, K in enumerate(kernel):if not img_stack:padding_num (K//2) * rateframe_pad F.pad(frames, [padding_num, padding_num, padding_num, padding_num])for i in range(0, K):for j in range(0, K):img_stack.append(frame_pad[..., i*rate:i*rate height, j*rate:j*rate width])img_stack torch.stack(img_stack, dim2)else:k_diff (kernel[index - 1] - kernel[index]) // 2img_stack img_stack[:, :, k_diff:-k_diff, ...]# print(img_stack:, img_stack.size())pred_img.append(torch.sum(core[K].mul(img_stack), dim2, keepdimFalse))pred_img torch.stack(pred_img, dim0)# print(pred_stack:, pred_img.size())pred_img_i torch.mean(pred_img, dim0, keepdimFalse)#print(pred_img_i, pred_img_i.size())# N 1pred_img_i pred_img_i.squeeze(2)#print(pred_img_i, pred_img_i.size())# if bias is permittedif self.core_bias:if bias is None:raise ValueError(The bias should not be None.)pred_img_i bias# print(white_level, white_level.size())pred_img_i pred_img_i / white_level#pred_img torch.mean(pred_img_i, dim1, keepdimTrue)# print(pred_img:, pred_img.size())# print(pred_img_i:, pred_img_i.size())return pred_img_iclass LossFunc(nn.Module):loss function of KPNdef __init__(self, coeff_basic1.0, coeff_anneal1.0, gradient_L1True, alpha0.9998, beta100):super(LossFunc, self).__init__()self.coeff_basic coeff_basicself.coeff_anneal coeff_annealself.loss_basic LossBasic(gradient_L1)self.loss_anneal LossAnneal(alpha, beta)def forward(self, pred_img_i, pred_img, ground_truth, global_step):forward function of loss_func:param frames: frame_1 ~ frame_N, shape: [batch, N, 3, height, width]:param core: a dict coverted by ......:param ground_truth: shape [batch, 3, height, width]:param global_step: int:return: lossreturn self.coeff_basic * self.loss_basic(pred_img, ground_truth), self.coeff_anneal * self.loss_anneal(global_step, pred_img_i, ground_truth)class LossBasic(nn.Module):Basic loss function.def __init__(self, gradient_L1True):super(LossBasic, self).__init__()self.l1_loss nn.L1Loss()self.l2_loss nn.MSELoss()self.gradient TensorGradient(gradient_L1)def forward(self, pred, ground_truth):return self.l2_loss(pred, ground_truth) \self.l1_loss(self.gradient(pred), self.gradient(ground_truth))class LossAnneal(nn.Module):anneal loss functiondef __init__(self, alpha0.9998, beta100):super(LossAnneal, self).__init__()self.global_step 0self.loss_func LossBasic(gradient_L1True)self.alpha alphaself.beta betadef forward(self, global_step, pred_i, ground_truth)::param global_step: int:param pred_i: [batch_size, N, 3, height, width]:param ground_truth: [batch_size, 3, height, width]:return:loss 0for i in range(pred_i.size(1)):loss self.loss_func(pred_i[:, i, ...], ground_truth)loss / pred_i.size(1)return self.beta * self.alpha ** global_step * lossclass TensorGradient(nn.Module):the gradient of tensordef __init__(self, L1True):super(TensorGradient, self).__init__()self.L1 L1def forward(self, img):w, h img.size(-2), img.size(-1)l F.pad(img, [1, 0, 0, 0])r F.pad(img, [0, 1, 0, 0])u F.pad(img, [0, 0, 1, 0])d F.pad(img, [0, 0, 0, 1])if self.L1:return torch.abs((l - r)[..., 0:w, 0:h]) torch.abs((u - d)[..., 0:w, 0:h])else:return torch.sqrt(torch.pow((l - r)[..., 0:w, 0:h], 2) torch.pow((u - d)[..., 0:w, 0:h], 2))if __name__ __main__:kpn KPN().cuda()a torch.randn(4, 3, 224, 224).cuda()b kpn(a, a)print(b.shape)第四步运行 第五步整个工程的内容 项目完整文件下载请见演示与介绍视频的简介处给出➷➷➷ PyTorch框架——基于深度学习EfficientDeRain神经网络AI去雨滴图像增强系统_哔哩哔哩_bilibili
http://www.w-s-a.com/news/630601/

相关文章:

  • 静态网站建设参考文献茂名营销型网站制作公司
  • 君山区建设局网站风铃微网站怎么做
  • 购物网站销售管理合肥网络推广平台
  • 网站建设规划书txt微盘注册帐号
  • 小说网站开发实训报告企业网盘收费标准
  • mvc网站开发医疗医院网站建设
  • 天津市建设厅官方网站wordpress设置404
  • 贵阳好的网站建设免费正能量网站下载ww
  • 免费学习的网站平台自建站seo如何做
  • 海南三亚做网站公众号版面设计创意
  • 学校网站建设目的与意义合肥网页定制
  • 网站查询地址网站建设与维护费用
  • 做网站哪些软件比较好合肥外贸网站建设公司
  • 建网站需要哪些条件专业网站设计报价
  • 定制网站开发技术化妆品的网站布局设计图片大全
  • 网站模糊设计发布产品的免费平台有哪些
  • 网站建站什么目录桂林网站建设内容
  • 光明新区城市建设局网站长沙营销型网站制作费用
  • 网站建设制度制定wordpress主题哥
  • 门户网站的种类php网站开发实训心得
  • 流程图制作网页网络优化seo
  • 个人公益网站怎么制作wordpress flat theme
  • 做营销型网站的公司篇高端网站愿建设
  • 五莲网站建设维护推广凡科做网站的方法
  • 山东省住房建设厅网站首页网站文章更新怎么通知搜索引擎
  • 商务网站的可行性分析包括大流量网站 优化
  • 推广网站有效的方法网站数据统计
  • 自建视频网站WordPress数据库添加管理员
  • 新民电商网站建设价格咨询网站建设高效解决之道
  • 做网站需要哪些步骤网站设计介绍