ppt做视频的模板下载网站有哪些,怎么做游戏网站的宣传图片,温州网站建设及推广,实训网站建设的总结PyTorch 模型构建
1、GPU配置2、数据预处理3、划分训练集、验证集、测试集4、选择模型5、设定损失函数优化方法6、模型效果评估
#导入常用包
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.transfor…PyTorch 模型构建
1、GPU配置2、数据预处理3、划分训练集、验证集、测试集4、选择模型5、设定损失函数优化方法6、模型效果评估
#导入常用包
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import transforms#超参数定义
# 批次的大小
batch_size 16 #可选32、64、128
# 优化器的学习率
lr 1e-4
#运行epoch
max_epochs 10
# 方案一指定GPU的方式
os.environ[CUDA_VISIBLE_DEVICES] 0,1 # 指明调用的GPU为0,1号# 方案二使用“device”后续对要使用GPU的变量用.to(device)即可
device torch.device(cuda:1 if torch.cuda.is_available() else cpu) # 指明调用的GPU为1号# 数据读取
#cifar10数据集为例给出构建Dataset类的方式
from torchvision import datasets#“data_transform”可以对图像进行一定的变换如翻转、裁剪、归一化等操作可自己定义
data_transformtransforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])train_cifar_dataset datasets.CIFAR10(cifar10,trainTrue, downloadFalse,transformdata_transform)
test_cifar_dataset datasets.CIFAR10(cifar10,trainFalse, downloadFalse,transformdata_transform)#构建好Dataset后就可以使用DataLoader来按批次读入数据了train_loader torch.utils.data.DataLoader(train_cifar_dataset, batch_sizebatch_size, num_workers4, shuffleTrue, drop_lastTrue)test_loader torch.utils.data.DataLoader(test_cifar_dataset, batch_sizebatch_size, num_workers4, shuffleFalse)train_cifar_dataset.__getitem__(1)[0].size()torch.Size([3, 32, 32])#定义模型
# 方法一预训练模型
import torchvision
Resnet50 torchvision.models.resnet50(pretrainedTrue)
Resnet50.fc.out_features10 # 修改分类得数量。
print(Resnet50)
D:\Users\xulele\Anaconda3\lib\site-packages\torchvision\models\_utils.py:208: UserWarning: The parameter pretrained is deprecated since 0.13 and may be removed in the future, please use weights instead.warnings.warn(
D:\Users\xulele\Anaconda3\lib\site-packages\torchvision\models\_utils.py:223: UserWarning: Arguments other than a weight enum or None for weights are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing weightsResNet50_Weights.IMAGENET1K_V1. You can also use weightsResNet50_Weights.DEFAULT to get the most up-to-date weights.warnings.warn(msg)ResNet((conv1): Conv2d(3, 64, kernel_size(7, 7), stride(2, 2), padding(3, 3), biasFalse)(bn1): BatchNorm2d(64, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(maxpool): MaxPool2d(kernel_size3, stride2, padding1, dilation1, ceil_modeFalse)(layer1): Sequential((0): Bottleneck((conv1): Conv2d(64, 64, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(64, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(64, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(64, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(64, 256, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(downsample): Sequential((0): Conv2d(64, 256, kernel_size(1, 1), stride(1, 1), biasFalse)(1): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)))(1): Bottleneck((conv1): Conv2d(256, 64, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(64, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(64, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(64, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(64, 256, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue))(2): Bottleneck((conv1): Conv2d(256, 64, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(64, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(64, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(64, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(64, 256, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)))(layer2): Sequential((0): Bottleneck((conv1): Conv2d(256, 128, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(128, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(128, 128, kernel_size(3, 3), stride(2, 2), padding(1, 1), biasFalse)(bn2): BatchNorm2d(128, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(128, 512, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(downsample): Sequential((0): Conv2d(256, 512, kernel_size(1, 1), stride(2, 2), biasFalse)(1): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)))(1): Bottleneck((conv1): Conv2d(512, 128, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(128, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(128, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(128, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(128, 512, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue))(2): Bottleneck((conv1): Conv2d(512, 128, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(128, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(128, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(128, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(128, 512, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue))(3): Bottleneck((conv1): Conv2d(512, 128, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(128, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(128, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(128, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(128, 512, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)))(layer3): Sequential((0): Bottleneck((conv1): Conv2d(512, 256, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(256, 256, kernel_size(3, 3), stride(2, 2), padding(1, 1), biasFalse)(bn2): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(256, 1024, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(1024, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(downsample): Sequential((0): Conv2d(512, 1024, kernel_size(1, 1), stride(2, 2), biasFalse)(1): BatchNorm2d(1024, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)))(1): Bottleneck((conv1): Conv2d(1024, 256, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(256, 1024, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(1024, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue))(2): Bottleneck((conv1): Conv2d(1024, 256, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(256, 1024, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(1024, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue))(3): Bottleneck((conv1): Conv2d(1024, 256, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(256, 1024, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(1024, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue))(4): Bottleneck((conv1): Conv2d(1024, 256, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(256, 1024, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(1024, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue))(5): Bottleneck((conv1): Conv2d(1024, 256, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(256, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(256, 1024, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(1024, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)))(layer4): Sequential((0): Bottleneck((conv1): Conv2d(1024, 512, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(512, 512, kernel_size(3, 3), stride(2, 2), padding(1, 1), biasFalse)(bn2): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(512, 2048, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(2048, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)(downsample): Sequential((0): Conv2d(1024, 2048, kernel_size(1, 1), stride(2, 2), biasFalse)(1): BatchNorm2d(2048, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)))(1): Bottleneck((conv1): Conv2d(2048, 512, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(512, 2048, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(2048, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue))(2): Bottleneck((conv1): Conv2d(2048, 512, kernel_size(1, 1), stride(1, 1), biasFalse)(bn1): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv2): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1), biasFalse)(bn2): BatchNorm2d(512, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(conv3): Conv2d(512, 2048, kernel_size(1, 1), stride(1, 1), biasFalse)(bn3): BatchNorm2d(2048, eps1e-05, momentum0.1, affineTrue, track_running_statsTrue)(relu): ReLU(inplaceTrue)))(avgpool): AdaptiveAvgPool2d(output_size(1, 1))(fc): Linear(in_features2048, out_features10, biasTrue)
)#训练验证# 定义损失函数和优化器
device torch.device(cuda:0 if torch.cuda.is_available() else cpu)
# 损失函数交叉熵
criterion torch.nn.CrossEntropyLoss()
# 优化器
optimizer torch.optim.Adam(Resnet50.parameters(), lrlr)
epoch max_epochs
Resnet50 Resnet50.to(device)
total_step len(train_loader)
train_all_loss []
val_all_loss []for i in range(epoch):Resnet50.train()train_total_loss 0train_total_num 0train_total_correct 0for iter, (images,labels) in enumerate(train_loader):images images.to(device)labels labels.to(device)outputs Resnet50(images)loss criterion(outputs,labels)train_total_correct (outputs.argmax(1) labels).sum().item()#backwordoptimizer.zero_grad()loss.backward()optimizer.step()train_total_num labels.shape[0]train_total_loss loss.item()print(Epoch [{}/{}], Iter [{}/{}], train_loss:{:4f}.format(i1,epoch,iter1,total_step,loss.item()/labels.shape[0]))Resnet50.eval()test_total_loss 0test_total_correct 0test_total_num 0for iter,(images,labels) in enumerate(test_loader):images images.to(device)labels labels.to(device)outputs Resnet50(images)loss criterion(outputs,labels)test_total_correct (outputs.argmax(1) labels).sum().item()test_total_loss loss.item()test_total_num labels.shape[0]print(Epoch [{}/{}], train_loss:{:.4f}, train_acc:{:.4f}%, test_loss:{:.4f}, test_acc:{:.4f}%.format(i1, epoch, train_total_loss / train_total_num, train_total_correct / train_total_num * 100, test_total_loss / test_total_num, test_total_correct / test_total_num * 100))train_all_loss.append(np.round(train_total_loss / train_total_num,4))test_all_loss.append(np.round(test_total_loss / test_total_num,4))
# 方法二自定义model
class DemoModel(nn.Module):def __init__(self):super(DemoModel, self).__init__()self.conv1 nn.Conv2d(3, 6, 5)self.pool nn.MaxPool2d(2, 2)self.conv2 nn.Conv2d(6, 16, 5)self.fc1 nn.Linear(16 * 5 * 5, 120)self.fc2 nn.Linear(120, 84)self.fc3 nn.Linear(84, 10)def forward(self, x):x self.pool(F.relu(self.conv1(x)))x self.pool(F.relu(self.conv2(x)))x x.view(-1, 16 * 5 * 5)x F.relu(self.fc1(x))x F.relu(self.fc2(x))x self.fc3(x)return x#训练验证# 定义损失函数和优化器
device torch.device(cuda:0 if torch.cuda.is_available() else cpu)
# 交叉熵
criterion torch.nn.CrossEntropyLoss()
# 优化器
optimizer torch.optim.Adam(Resnet50.parameters(), lrlr)
epoch max_epochs
My_model DemoModel()
My_model My_model.to(device)
total_step len(train_loader)
train_all_loss []
val_all_loss []
for i in range(epoch):My_model.train()train_total_loss 0train_total_num 0train_total_correct 0for iter, (images,labels) in enumerate(train_loader):images images.to(device)labels labels.to(device)outputs My_model(images)loss criterion(outputs,labels)train_total_correct (outputs.argmax(1) labels).sum().item()#backwordoptimizer.zero_grad()loss.backward()optimizer.step()train_total_num labels.shape[0]train_total_loss loss.item()print(Epoch [{}/{}], Iter [{}/{}], train_loss:{:4f}.format(i1,epoch,iter1,total_step,loss.item()/labels.shape[0]))My_model.eval()test_total_loss 0test_total_correct 0test_total_num 0for iter,(images,labels) in enumerate(test_loader):images images.to(device)labels labels.to(device)outputs My_model(images)loss criterion(outputs,labels)test_total_correct (outputs.argmax(1) labels).sum().item()test_total_loss loss.item()test_total_num labels.shape[0]print(Epoch [{}/{}], train_loss:{:.4f}, train_acc:{:.4f}%, test_loss:{:.4f}, test_acc:{:.4f}%.format(i1, epoch, train_total_loss / train_total_num, train_total_correct / train_total_num * 100, test_total_loss / test_total_num, test_total_correct / test_total_num * 100))train_all_loss.append(np.round(train_total_loss / train_total_num,4))test_all_loss.append(np.round(test_total_loss / test_total_num,4))