中山市 做网站,网站建设如何上传文件,网站开发建设合同模板,台州华燕网业有限公司目录
一、知识点
二、代码
三、查看卷积层的feature map
1. 查看每层信息
2. show_featureMap.py 背景#xff1a;LeNet-5是一个经典的CNN#xff0c;由Yann LeCun在1998年提出#xff0c;旨在解决手写数字识别问题。 一、知识点 1. iter()next() iter()#xff1a;…目录
一、知识点
二、代码
三、查看卷积层的feature map
1. 查看每层信息
2. show_featureMap.py 背景LeNet-5是一个经典的CNN由Yann LeCun在1998年提出旨在解决手写数字识别问题。 一、知识点 1. iter()next() iter()返回迭代器 next()使用next()来获取下一条数据 data [1, 2, 3]
data_iter iter(data)
print(next(data_iter)) # 1
print(next(data_iter)) # 2
print(next(data_iter)) # 3 2. enumerate enumerate(sequence,[start0]) 函数用于将一个可遍历的数据对象组合为一个索引序列同时列出数据和数据下标一般用在 for 循环当中。 start--下标起始位置的值。 data [zs, ls, ww]
print(list(enumerate(data)))
# [(0, zs), (1, ls), (2, ww)] 3. torch.no_grad() 在该模块下所有计算得出的tensor的requires_grad都自动设置为False。 当requires_grad设置为False时在反向传播时就不会自动求导了可以节约存储空间。 4. torch.max(input,dim) input -- tensor类型 dim0 -- 行比较 dim1 -- 列比较 import torchdata torch.Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
x torch.max(data, dim0)
print(x)
# valuestensor([7., 8., 9.]),
# indicestensor([2, 2, 2])
x torch.max(data, dim1)
print(x)
# valuestensor([3., 6., 9.]),
# indicestensor([2, 2, 2])5. torch.eq对两个张量Tensor进行逐个元素的比较若相同位置的两个元素相同则返回True若不同返回False。 注意item返回一个数。 import torchdata1 torch.tensor([1, 2, 3, 4, 5])
data2 torch.tensor([2, 3, 3, 9, 5])
x torch.eq(data1, data2)
print(x) # tensor([False, False, True, False, True])
sum torch.eq(data1, data2).sum()
print(sum) # tensor(2)
sum_item torch.eq(data1, data2).sum().item()
print(sum_item) # 2 6. squeeze(input,dim)函数 squeeze(0)若第一维度值为1则去除第一维度 squeeze(1)若第二维度值为2则去除第二维度 squeeze(-1)去除最后维度值为1的维度 7. unsqueeze(input,dim) 增加大小为1的维度即返回一个新的张量对输入的指定位置插入维度 1且必须指明维度。 二、代码
model.py
import torch.nn as nn
import torch.nn.functional as Fclass LeNet(nn.Module):def __init__(self):super(LeNet, self).__init__()self.conv1 nn.Conv2d(3, 16, 5) # output(16,28,28)self.pool1 nn.MaxPool2d(2, 2) # output(16,14,14)self.conv2 nn.Conv2d(16, 32, 5) # output(32,10,10)self.pool2 nn.MaxPool2d(2, 2) # output(32,5,5)self.fc1 nn.Linear(32 * 5 * 5, 120) # output:120self.fc2 nn.Linear(120, 84) # output:84self.fc3 nn.Linear(84, 10) # output:10def forward(self, x):x F.relu(self.conv1(x))x self.pool1(x)x F.relu(self.conv2(x))x self.pool2(x)x x.view(-1, 32 * 5 * 5)x F.relu(self.fc1(x))x F.relu(self.fc2(x))x self.fc3(x)return xtrain.py
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transformsfrom model import LeNetdef main():# preprocess datatransform transforms.Compose([# Converts a PIL Image or numpy.ndarray (H x W x C) in the range [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]transforms.ToTensor(),# (mean[1],...,mean[n]) and std: (std[1],..,std[n])transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])# 训练集 如果数据集已经下载了则downloadFalsetrain_data torchvision.datasets.CIFAR10(./data, trainTrue, transformtransform, downloadFalse)train_loader torch.utils.data.DataLoader(train_data, batch_size36, shuffleTrue, num_workers0)# 验证集val_data torchvision.datasets.CIFAR10(./data, trainFalse, downloadFalse, transformtransform)val_loader torch.utils.data.DataLoader(val_data, batch_size10000, shuffleFalse, num_workers0)# 返回迭代器val_data_iter iter(val_loader)val_image, val_label next(val_data_iter)net LeNet()loss_function nn.CrossEntropyLoss()optimizer optim.Adam(net.parameters(), lr0.001)# loop over the dataset multiple timesfor epoch in range(5):epoch_loss 0for step, data in enumerate(train_loader, start0):# get the inputs from train_loader;data is a list of[inputs,labels]inputs, labels data# 在处理每一个batch时并不需要与其他batch的梯度混合起来累积计算因此需要对每个batch调用一遍zero_grad()将参数梯度设置为0optimizer.zero_grad()# 1.forwardoutputs net(inputs)# 2.lossloss loss_function(outputs, labels)# 3.backpropagationloss.backward()# 4.update x by optimizeroptimizer.step()# print statistics# 使用item()取出的元素值的精度更高epoch_loss loss.item()# print every 500 mini-batchesif step % 500 499:with torch.no_grad():outputs net(val_image)predict_y torch.max(outputs, dim1)[1] # [0]取每行最大值[1]取每行最大值的索引val_accuracy torch.eq(predict_y, val_label).sum().item() / val_label.size(0)print([epoch%d step%5d] train_loss:%.3f test_accuracy:%.3f % (epoch 1, step 1, epoch_loss / 500, val_accuracy))epoch_loss 0print(Train finished!)sava_path ./model/LeNet.pthtorch.save(net.state_dict(), sava_path)if __name__ __main__:main()predict.py
import torch
import torchvision.transforms as transforms
from PIL import Image
from model import LeNetdef main():transform transforms.Compose([transforms.Resize((32, 32)),transforms.ToTensor(), # CHW格式transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])classes [plane, car, bird, cat, deer, dog, frog, horse, ship, truck]net LeNet()net.load_state_dict(torch.load(./model/LeNet.pth))image Image.open(./predict/2.png) # HWC格式image transform(image)image torch.unsqueeze(image, dim0) # 在第0维加一个维度 #[N,C,H,W] N:Batch批处理大小with torch.no_grad():outputs net(image)predict torch.max(outputs, dim1)[1]print(classes[predict])if __name__ __main__:main()2.png 三、查看卷积层的feature map
1. 查看每层信息 for i in net.children():print(i)
2. show_featureMap.py
import torch
import torch.nn as nn
from model import LeNet
import torchvision
import torchvision.transforms as transforms
from PIL import Image
import matplotlib.pyplot as pltdef main():transform transforms.Compose([transforms.Resize((32, 32)),transforms.ToTensor(), # CHW格式transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])image Image.open(./predict/2.png) # HWC格式image transform(image)image torch.unsqueeze(image, dim0) # 在第0维加一个维度 #[N,C,H,W] N:Batch批处理大小net LeNet()net.load_state_dict(torch.load(./model/LeNet.pth))conv_weights [] # 模型权重conv_layers [] # 模型卷积层counter 0 # 模型里有多少个卷积层# 1.将卷积层以及对应权重放入列表中model_children list(net.children())for i in range(len(model_children)):if type(model_children[i]) nn.Conv2d:counter 1conv_weights.append(model_children[i].weight)conv_layers.append(model_children[i])outputs []names []for layer in conv_layers[0:]:# 2.每个卷积层对image进行计算image layer(image)outputs.append(image)names.append(str(layer))# 3.进行维度转换print(outputs[0].shape) # torch.Size([1, 16, 28, 28]) 1-batch 16-channel 28-H 28-Wprint(outputs[0].squeeze(0).shape) # torch.Size([16, 28, 28]) 去除第0维# 将16颜色通道的feature map加起来变为一张28×28的feature mapsum将所有灰度图映射到一张print(torch.sum(outputs[0].squeeze(0), 0).shape) # torch.Size([28, 28])processed_data []for feature_map in outputs:feature_map feature_map.squeeze(0) # torch.Size([16, 28, 28])gray_scale torch.sum(feature_map, 0) # torch.Size([28, 28])# 取所有灰度图的平均值gray_scale gray_scale / feature_map.shape[0]processed_data.append(gray_scale.data.numpy())# 4.可视化特征图figure plt.figure()for i in range(len(processed_data)):x figure.add_subplot(1, 2, i 1)x.imshow(processed_data[i])x.set_title(names[i].split(()[0])plt.show()if __name__ __main__:main()