微博建网站,东莞模板网站制作哪家好,重庆网红,企业培训考试appb站小土堆pytorch教程学习笔记 一、从零开始构建自己的神经网络
1.模型构建
#准备数据集
import torch
import torchvision
from torch.utils.tensorboard import SummaryWriterfrom model import *
from torch.utils.data import DataLoadertrain_datatorchvision.datasets.… b站小土堆pytorch教程学习笔记 一、从零开始构建自己的神经网络
1.模型构建
#准备数据集
import torch
import torchvision
from torch.utils.tensorboard import SummaryWriterfrom model import *
from torch.utils.data import DataLoadertrain_datatorchvision.datasets.CIFAR10(dataset,trainTrue,transformtorchvision.transforms.ToTensor(),downloadTrue)
test_datatorchvision.datasets.CIFAR10(dataset,trainFalse,transformtorchvision.transforms.ToTensor(),downloadTrue)
#查看训练数据集和测试集大小
train_data_sizelen(train_data)
test_data_sizelen(test_data)
print(训练数据集长度为{}.format(train_data_size))#训练数据集长度为50000
print(测试数据集长度为{}.format(test_data_size))#测试数据集长度为10000#利用datalo加载数据集
train_dataloaderDataLoader(train_data,batch_size64)
test_dataloaderDataLoader(test_data,batch_size64)#搭建神经网络在model文件中搭建网络在此文件中引用
hanHan()#损失函数
loss_fnnn.CrossEntropyLoss()#优化器
# learning_rate0.01
learning_rate1e-2
optimizertorch.optim.SGD(han.parameters(),lrlearning_rate)#设置训练网络的相关参数
total_train_step 0#记录训练的次数
total_test_step 0#记录测试的次数
epoch10#训练轮数#添加tensorboard
writerSummaryWriter(logs/train)for i in range(10):print(-------第{}轮训练开始-------.format(i1))for data in train_dataloader:imgs,targetdataoutputhan(imgs)lossloss_fn(output,target)#优化器优化模型optimizer.zero_grad()#梯度清零loss.backward()#反向传播计算梯度optimizer.step()#参数优化total_train_steptotal_train_step1if total_train_step % 1000:#逢100打印print(训练次数{},loss:{}.format(total_train_step,loss.item()))#loss.item()取出tensor类型的数字writer.add_scalar(train_loss,loss.item(),total_train_step)#每训练完一轮将在测试集上跑一遍评估其训练效果total_test_loss0with torch.no_grad():for data in test_dataloader:imgs,targetdataoutputhan(imgs)lossloss_fn(output,target)total_test_losstotal_test_lossloss.item()print(所有测试集上的损失:{}.format(total_test_loss))writer.add_scalar(test_loss,total_test_loss,total_test_step)total_test_step1#保存每一轮模型torch.save(han,han_{}.pth.format(i))print(模型已保存)
writer.close()import torch
from torch import nnclass Han(nn.Module):def __init__(self):super(Han, self).__init__()self.model nn.Sequential(nn.Conv2d(3, 32, kernel_size5, stride1, padding2),nn.MaxPool2d(2),nn.Conv2d(32, 32, kernel_size5, stride1, padding2),nn.MaxPool2d(2),nn.Conv2d(32, 64, kernel_size5, stride1, padding2),nn.MaxPool2d(2),nn.Flatten(),nn.Linear(64 * 4 * 4, 64),nn.Linear(64, 10))def forward(self, x):x self.model(x)return xif __name__ __main__:hanHan()inputtorch.ones(64,3,32,32)outputhan(input)print(output.shape)#torch.Size([64, 10])10表示十个类别输出概率结果如下
2.使用argmax计算整体正确率
#每训练完一轮将在测试集上跑一遍评估其训练效果total_test_loss0total_acc0with torch.no_grad():for data in test_dataloader:imgs,targetdataoutputhan(imgs)lossloss_fn(output,target)total_test_losstotal_test_lossloss.item()acc(output.argmax(1)target).sum()#(1)横着看total_accaccprint(所有测试集上的损失:{}.format(total_test_loss))print(整体测试集上的正确率{}.format(total_acc/test_data_size))writer.add_scalar(test_loss,total_test_loss,total_test_step)writer.add_scalar(test_acc, total_acc/test_data_size, total_test_step)total_test_step1整体测试集上的正确率0.27480000257492065
3.当训练或测试时存在dropout层或batch normal层则需要在训练训练和测试前加入
#训练前
han.train()
#测试前
han.eval()二、使用GPU
网络模型、数据输入、标注、损失函数调用cuda()
1.方式1
#模型
if torch.cuda.is_available():hanhan.cuda()#损失函数
loss_fnnn.CrossEntropyLoss()
loss_fnloss_fn.cuda()imgs,targetdata
imgsimgs.cuda()
targettarget.cuda()2.方式2
#定义训练设备
devicetorch.device(cuda)hanhan.to(device)imgs imgs.to(device)
target target.to(device)