普陀区网站开发,怎样建立一个网站步骤,旅游app营销策略,杭州装饰网站建设方案【残差网络ResNet#xff1a;残差块输入输出形状控制】 1 残差块输入输出形状控制程序2 查看经典的ResNet18模型 1 残差块输入输出形状控制程序 参考链接#xff1a;https://arxiv.org/pdf/1512.03385.pdf 这是一个基本的残差块#xff0c;由两层卷积组成前向传播 一层卷积… 【残差网络ResNet残差块输入输出形状控制】 1 残差块输入输出形状控制程序2 查看经典的ResNet18模型 1 残差块输入输出形状控制程序 参考链接https://arxiv.org/pdf/1512.03385.pdf 这是一个基本的残差块由两层卷积组成前向传播 一层卷积和批归一化与组成为了与两层卷积组成前向传播的形状一致一层卷积和批归一化用来控制输出的形状最终相加形成新的与前向传播一致的形状
class ResNetBasicBlock(nn.Module):def __init__(self, in_channels, out_channels, stride):super().__init__()self.conv1 nn.Conv2d(in_channels, out_channels,kernel_size3, stridestride, padding1, biasFalse)self.bn1 nn.BatchNorm2d(out_channels)self.conv2 nn.Conv2d(out_channels, out_channels,kernel_size3, stridestride, padding1, biasFalse)self.bn2 nn.BatchNorm2d(out_channels)self.residual nn.Conv2d(in_channels, out_channels,kernel_size3, stridestride, padding1, biasFalse)self.bn3 nn.BatchNorm2d(out_channels)def forward(self, x):out self.conv1(x)out F.relu(self.bn1(out),inplaceTrue)out self.conv2(out)out self.bn2(out)res self.residual(x)res self.bn3(res)out res # 直连return F.relu(out)测试代码如下
imgs_batch torch.randn((8, 3, 224, 244))
resnet_block ResNetBasicBlock(3, 16, 1)
pred_batch resnet_block(imgs_batch)
print(pred_batch.shape)输出如下
torch.Size([8, 16, 224, 244])使用tensorboard观察结构图代码
from torch.utils.tensorboard import SummaryWriterwriter SummaryWriter(my_log/ResNetBasicBlock)
writer.add_graph(resnet_block, imgs_batch)
# 在promote中输入tensorboard --logdir path --host127.0.0.1 ,path为绝对路径不加双引号按照提示打开tensorboard2 查看经典的ResNet18模型
resnet_model torchvision.models.resnet18(pretrainedFalse)
print(resnet_model)输出如下
ResNet((conv1): Conv2d(3, 64, kernel_size(7, 7), stride(2, 2), padding(3, 3), biasFalse)(bn1): BatchNorm2d(64, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(maxpool): MaxPool2d(kernel_size3, stride2, padding1, dilation1, ceil_modeFalse)(layer1): Sequential((0): BasicBlock((conv1): Conv2d(64, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn1): BatchNorm2d(64, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(conv2): Conv2d(64, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(64, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue))(1): BasicBlock((conv1): Conv2d(64, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn1): BatchNorm2d(64, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(conv2): Conv2d(64, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(64, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)))(layer2): Sequential((0): BasicBlock((conv1): Conv2d(64, 128, kernel_size(3, 3), stride(2, 2), padding(1, 1), biasFalse)(bn1): BatchNorm2d(128, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(conv2): Conv2d(128, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(128, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(downsample): Sequential((0): Conv2d(64, 128, kernel_size(1, 1), stride(2, 2), biasFalse)(1): BatchNorm2d(128, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)))(1): BasicBlock((conv1): Conv2d(128, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn1): BatchNorm2d(128, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(conv2): Conv2d(128, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(128, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)))(layer3): Sequential((0): BasicBlock((conv1): Conv2d(128, 256, kernel_size(3, 3), stride(2, 2), padding(1, 1), biasFalse)(bn1): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(conv2): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(downsample): Sequential((0): Conv2d(128, 256, kernel_size(1, 1), stride(2, 2), biasFalse)(1): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)))(1): BasicBlock((conv1): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn1): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(conv2): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)))(layer4): Sequential((0): BasicBlock((conv1): Conv2d(256, 512, kernel_size(3, 3), stride(2, 2), padding(1, 1), biasFalse)(bn1): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(conv2): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(downsample): Sequential((0): Conv2d(256, 512, kernel_size(1, 1), stride(2, 2), biasFalse)(1): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)))(1): BasicBlock((conv1): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn1): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(conv2): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)))(avgpool): AdaptiveAvgPool2d(output_size(1, 1))(fc): Linear(in_features512, out_features1000, biasTrue)
)