当前位置: 首页 > news >正文

怎么做手机网站服务器网站绑定域名

怎么做手机网站,服务器网站绑定域名,抖音小程序定制开发,wordpress标记已读文章目录 FashionMNIST数据集需求库导入、数据迭代器生成设备选择样例图片展示日志写入评估—计数器模型构建训练函数整体代码训练过程日志 FashionMNIST数据集 FashionMNIST#xff08;时尚 MNIST#xff09;是一个用于图像分类的数据集#xff0c;旨在替代传统的手写数字… 文章目录 FashionMNIST数据集需求库导入、数据迭代器生成设备选择样例图片展示日志写入评估—计数器模型构建训练函数整体代码训练过程日志 FashionMNIST数据集 FashionMNIST时尚 MNIST是一个用于图像分类的数据集旨在替代传统的手写数字MNIST数据集。它由 Zalando Research 创建适用于深度学习和计算机视觉的实验。 FashionMNIST 包含 10 个类别分别对应不同的时尚物品。这些类别包括 T恤/上衣、裤子、套头衫、裙子、外套、凉鞋、衬衫、运动鞋、包和踝靴。每个类别有 6,000 张训练图像和 1,000 张测试图像总计 70,000 张图像。每张图像的尺寸为 28x28 像素与MNIST数据集相同。数据集中的每个图像都是灰度图像像素值在0到255之间。 需求库导入、数据迭代器生成 import os import random import numpy as np import datetime import torch import torch.nn as nn from torch.utils.data import DataLoaderimport torchvision from torchvision import transformsimport argparse from tqdm import tqdmimport matplotlib.pyplot as plt from torch.utils.tensorboard import SummaryWriterdef _load_data():download the data, and generate the dataloadertrans transforms.Compose([transforms.ToTensor()])train_dataset torchvision.datasets.FashionMNIST(root./data/, trainTrue, downloadTrue, transformtrans)test_dataset torchvision.datasets.FashionMNIST(root./data/, trainFalse, downloadTrue, transformtrans)# print(len(train_dataset), len(test_dataset))train_loader DataLoader(train_dataset, shuffleTrue, batch_sizeargs.batch_size, num_workersargs.num_works)test_loader DataLoader(test_dataset, shuffleTrue, batch_sizeargs.batch_size, num_workersargs.num_works)return (train_loader, test_loader)设备选择 def _device():device torch.device(cuda if torch.cuda.is_available() else cpu)return device样例图片展示 display data examples def _image_label(labels):text_labels [t-shirt, trouser, pullover, dress, coat,sandal, shirt, sneaker, bag, ankle boot]return [text_labels[int(i)] for i in labels]def _show_images(imgs, rows, columns, titlesNone, scale1.5):figsize (rows * scale, columns * 1.5)fig, axes plt.subplots(rows, columns, figsizefigsize)axes axes.flatten()for i, (img, ax) in enumerate(zip(imgs, axes)):ax.imshow(img)ax.axes.get_xaxis().set_visible(False)ax.axes.get_yaxis().set_visible(False)if titles:ax.set_title(titles[i])plt.show()return axesdef _show_examples():train_loader, test_loader _load_data()for images, labels in train_loader:images images.squeeze(1)_show_images(images, 3, 3, _image_label(labels))break日志写入 class _logger():def __init__(self, log_dir, log_historyTrue):if log_history:log_dir os.path.join(log_dir, datetime.datetime.now().strftime(%Y_%m_%d__%H_%M_%S))self.summary SummaryWriter(log_dir)def scalar_summary(self, tag, value, step):self.summary.add_scalars(tag, value, step)def images_summary(self, tag, image_tensor, step):self.summary.add_images(tag, image_tensor, step)def figure_summary(self, tag, figure, step):self.summary.add_figure(tag, figure, step)def graph_summary(self, model):self.summary.add_graph(model)def close(self):self.summary.close()评估—计数器 class AverageMeter():def __init__(self):self.reset()def reset(self):self.val 0self.avg 0self.sum 0self.count 0def update(self, val, n1):self.val valself.sum val * nself.count nself.avg self.sum / self.count模型构建 class Conv3x3(nn.Module):def __init__(self, in_channels, out_channels, down_sampleFalse):super(Conv3x3, self).__init__()self.conv nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1),nn.BatchNorm2d(out_channels),nn.ReLU(inplaceTrue),nn.Conv2d(out_channels, out_channels, 3, 1, 1),nn.BatchNorm2d(out_channels),nn.ReLU(inplaceTrue))if down_sample:self.conv[3] nn.Conv2d(out_channels, out_channels, 2, 2, 0)def forward(self, x):return self.conv(x)class SimpleNet(nn.Module):def __init__(self, in_channels, out_channels):super(SimpleNet, self).__init__()self.conv1 Conv3x3(in_channels, 32)self.conv2 Conv3x3(32, 64, down_sampleTrue)self.conv3 Conv3x3(64, 128)self.conv4 Conv3x3(128, 256, down_sampleTrue)self.fc nn.Linear(256*7*7, out_channels)def forward(self, x):x self.conv1(x)x self.conv2(x)x self.conv3(x)x self.conv4(x)x torch.flatten(x, 1)out self.fc(x)return out训练函数 def train(model, train_loader, test_loader, criterion, optimizor, epochs, device, writer, save_weightFalse):train_loss AverageMeter()test_loss AverageMeter()train_precision AverageMeter()test_precision AverageMeter()time_tick datetime.datetime.now().strftime(%Y_%m_%d__%H_%M_%S)for epoch in range(epochs):print(\nEpoch: [%d | %d] LR: %f % (epoch 1, args.epochs, args.lr))model.train()for input, label in tqdm(train_loader):input, label input.to(device), label.to(device)output model(input)# backwardloss criterion(output, label)optimizor.zero_grad()loss.backward()optimizor.step()# loggerpredict torch.argmax(output, dim1)train_pre sum(predict label) / len(label)train_loss.update(loss.item(), input.size(0))train_precision.update(train_pre.item(), input.size(0))model.eval()with torch.no_grad():for X, y in tqdm(test_loader):X, y X.to(device), y.to(device)y_hat model(X)loss_te criterion(y_hat, y)predict_ torch.argmax(y_hat, dim1)test_pre sum(predict_ y) / len(y)test_loss.update(loss_te.item(), X.size(0))test_precision.update(test_pre.item(), X.size(0))if save_weight:best_dice args.best_diceweight_dir os.path.join(args.weight_dir, args.model, time_tick)os.makedirs(weight_dir, exist_okTrue)monitor_dice test_precision.avgif monitor_dice best_dice:best_dice max(monitor_dice, best_dice)name os.path.join(weight_dir, args.model _ str(epoch) \_test_loss- str(round(test_loss.avg, 4)) \_test_dice- str(round(best_dice, 4)) .pt)torch.save(model.state_dict(), name)print(train ---Loss: {loss:.4f} | Dice: {dice:.4f}.format(losstrain_loss.avg, dicetrain_precision.avg))print(test ---Loss: {loss:.4f} | Dice: {dice:.4f}.format(losstest_loss.avg, dicetest_precision.avg))# summarywriter.scalar_summary(Loss/loss, {train: train_loss.avg, test: test_loss.avg}, epoch)writer.scalar_summary(Loss/precision, {train: train_precision.avg, test: test_precision.avg}, epoch)writer.close()整体代码 import os import random import numpy as np import datetime import torch import torch.nn as nn from torch.utils.data import DataLoaderimport torchvision from torchvision import transformsimport argparse from tqdm import tqdmimport matplotlib.pyplot as plt from torch.utils.tensorboard import SummaryWriterReproduction experiment def setup_seed(seed):random.seed(seed)np.random.seed(seed)torch.manual_seed(seed)torch.cuda.manual_seed(seed)torch.cuda.manual_seed_all(seed)# torch.backends.cudnn.benchmark False# torch.backends.cudnn.enabled False# torch.backends.cudnn.deterministic Truedata related def _base_options():parser argparse.ArgumentParser(descriptionTrain setting for FashionMNIST)# about datasetparser.add_argument(--batch_size, default8, typeint, helpthe batch size of dataset)parser.add_argument(--num_works, default4, typeint, helpthe num_works used)# trainparser.add_argument(--epochs, default100, typeint, helptrain iterations)parser.add_argument(--lr, default0.001, typefloat, helplearning rate)parser.add_argument(--model, defaultSimpleNet, choices[SimpleNet], helpthe model choosed)# log dirparser.add_argument(--log_dir, default./logger/, helpthe path of log file)#parser.add_argument(--best_dice, default-100, typeint, helpfor save weight)parser.add_argument(--weight_dir, default./weight/, helpthe dir for save weight)args parser.parse_args()return argsdef _load_data():download the data, and generate the dataloadertrans transforms.Compose([transforms.ToTensor()])train_dataset torchvision.datasets.FashionMNIST(root./data/, trainTrue, downloadTrue, transformtrans)test_dataset torchvision.datasets.FashionMNIST(root./data/, trainFalse, downloadTrue, transformtrans)# print(len(train_dataset), len(test_dataset))train_loader DataLoader(train_dataset, shuffleTrue, batch_sizeargs.batch_size, num_workersargs.num_works)test_loader DataLoader(test_dataset, shuffleTrue, batch_sizeargs.batch_size, num_workersargs.num_works)return (train_loader, test_loader)def _device():device torch.device(cuda if torch.cuda.is_available() else cpu)return devicedisplay data examples def _image_label(labels):text_labels [t-shirt, trouser, pullover, dress, coat,sandal, shirt, sneaker, bag, ankle boot]return [text_labels[int(i)] for i in labels]def _show_images(imgs, rows, columns, titlesNone, scale1.5):figsize (rows * scale, columns * 1.5)fig, axes plt.subplots(rows, columns, figsizefigsize)axes axes.flatten()for i, (img, ax) in enumerate(zip(imgs, axes)):ax.imshow(img)ax.axes.get_xaxis().set_visible(False)ax.axes.get_yaxis().set_visible(False)if titles:ax.set_title(titles[i])plt.show()return axesdef _show_examples():train_loader, test_loader _load_data()for images, labels in train_loader:images images.squeeze(1)_show_images(images, 3, 3, _image_label(labels))breaklog class _logger():def __init__(self, log_dir, log_historyTrue):if log_history:log_dir os.path.join(log_dir, datetime.datetime.now().strftime(%Y_%m_%d__%H_%M_%S))self.summary SummaryWriter(log_dir)def scalar_summary(self, tag, value, step):self.summary.add_scalars(tag, value, step)def images_summary(self, tag, image_tensor, step):self.summary.add_images(tag, image_tensor, step)def figure_summary(self, tag, figure, step):self.summary.add_figure(tag, figure, step)def graph_summary(self, model):self.summary.add_graph(model)def close(self):self.summary.close()evaluate the result class AverageMeter():def __init__(self):self.reset()def reset(self):self.val 0self.avg 0self.sum 0self.count 0def update(self, val, n1):self.val valself.sum val * nself.count nself.avg self.sum / self.countdefine the Net class Conv3x3(nn.Module):def __init__(self, in_channels, out_channels, down_sampleFalse):super(Conv3x3, self).__init__()self.conv nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1),nn.BatchNorm2d(out_channels),nn.ReLU(inplaceTrue),nn.Conv2d(out_channels, out_channels, 3, 1, 1),nn.BatchNorm2d(out_channels),nn.ReLU(inplaceTrue))if down_sample:self.conv[3] nn.Conv2d(out_channels, out_channels, 2, 2, 0)def forward(self, x):return self.conv(x)class SimpleNet(nn.Module):def __init__(self, in_channels, out_channels):super(SimpleNet, self).__init__()self.conv1 Conv3x3(in_channels, 32)self.conv2 Conv3x3(32, 64, down_sampleTrue)self.conv3 Conv3x3(64, 128)self.conv4 Conv3x3(128, 256, down_sampleTrue)self.fc nn.Linear(256*7*7, out_channels)def forward(self, x):x self.conv1(x)x self.conv2(x)x self.conv3(x)x self.conv4(x)x torch.flatten(x, 1)out self.fc(x)return outprogress of train/test def train(model, train_loader, test_loader, criterion, optimizor, epochs, device, writer, save_weightFalse):train_loss AverageMeter()test_loss AverageMeter()train_precision AverageMeter()test_precision AverageMeter()time_tick datetime.datetime.now().strftime(%Y_%m_%d__%H_%M_%S)for epoch in range(epochs):print(\nEpoch: [%d | %d] LR: %f % (epoch 1, args.epochs, args.lr))model.train()for input, label in tqdm(train_loader):input, label input.to(device), label.to(device)output model(input)# backwardloss criterion(output, label)optimizor.zero_grad()loss.backward()optimizor.step()# loggerpredict torch.argmax(output, dim1)train_pre sum(predict label) / len(label)train_loss.update(loss.item(), input.size(0))train_precision.update(train_pre.item(), input.size(0))model.eval()with torch.no_grad():for X, y in tqdm(test_loader):X, y X.to(device), y.to(device)y_hat model(X)loss_te criterion(y_hat, y)predict_ torch.argmax(y_hat, dim1)test_pre sum(predict_ y) / len(y)test_loss.update(loss_te.item(), X.size(0))test_precision.update(test_pre.item(), X.size(0))if save_weight:best_dice args.best_diceweight_dir os.path.join(args.weight_dir, args.model, time_tick)os.makedirs(weight_dir, exist_okTrue)monitor_dice test_precision.avgif monitor_dice best_dice:best_dice max(monitor_dice, best_dice)name os.path.join(weight_dir, args.model _ str(epoch) \_test_loss- str(round(test_loss.avg, 4)) \_test_dice- str(round(best_dice, 4)) .pt)torch.save(model.state_dict(), name)print(train ---Loss: {loss:.4f} | Dice: {dice:.4f}.format(losstrain_loss.avg, dicetrain_precision.avg))print(test ---Loss: {loss:.4f} | Dice: {dice:.4f}.format(losstest_loss.avg, dicetest_precision.avg))# summarywriter.scalar_summary(Loss/loss, {train: train_loss.avg, test: test_loss.avg}, epoch)writer.scalar_summary(Loss/precision, {train: train_precision.avg, test: test_precision.avg}, epoch)writer.close()if __name__ __main__:# configargs _base_options()device _device()# datatrain_loader, test_loader _load_data()# loggerwriter _logger(log_diros.path.join(args.log_dir, args.model))# modelmodel SimpleNet(in_channels1, out_channels10).to(device)optimizor torch.optim.Adam(model.parameters(), lrargs.lr)criterion nn.CrossEntropyLoss()train(model, train_loader, test_loader, criterion, optimizor, args.epochs, device, writer, save_weightTrue) args _base_options()_show_examples() # ——— 样例图片显示训练过程 日志
http://www.w-s-a.com/news/981701/

相关文章:

  • 网络销售代理加盟南京seo排名扣费
  • 赤峰中国建设招标网站网站开发投标文件
  • 域名抢住网站婚庆网页设计
  • 公司网站建设的通知南宁怎么做网站
  • 搜狐快站建站教程电子商务网站后台模板
  • .gs域名做网站怎么样做网站有没有用
  • 肇庆住房和城乡建设局网站广州seo公司排名
  • j2ee网站开发买什么书网络媒体有哪些
  • 江西省住房建设部官方网站用多说的网站
  • 云课堂哪个网站做的好网站 集约化平台建设方案的通知
  • 撰写网站栏目规划怎么建自己的平台
  • 中国建设银行巴黎分行网站建设银行忘记密码网站首页
  • 网站左侧树形导航怎么做像wordpress一样的网站吗
  • 做网站用的书公司做网站 需要解决哪些问题
  • 电器网站建设策划书深圳动画制作
  • cpa网站建设wordpress支付宝微信收费吗
  • 权威网站排名桂林生活网论坛
  • 网站设计息济南网站建设济南
  • 安蓉建设总公司网站网站怎么做才能被百度收录
  • 电子商务网站业务流程分析做效果图的外包网站
  • wordpress仿站视频教程wordpress用什么php版本好
  • 郑州做网站九零后网络沧州做网站的专业公司
  • 小游戏网站建设可以自己做图片的软件
  • 湖南地税局官网站水利建设基金app仿制
  • 苏州网站设计kgwl建设网站需要用到哪些技术人员
  • 万户网络做网站如何亚马逊网站建设
  • 门户网站制作费用暴雪公司最新消息
  • 深圳专业建网站公司济南公司做网站的价格
  • 怎么运行自己做的网站网上申请平台怎么申请
  • 旅游公司网站 优帮云新闻近期大事件