Tips:附件

day10.ipynb

  • 计算准确率:accuracy = (outputs.argmax(1) == targers).sum()/test_data_size,outputs为输出

    1. 导包

    1. import torch
    2. import torchvision
    3. from torch.utils.data import DataLoader
    4. from torch import nn
    5. from torch.nn import Conv2d, MaxPool2d, Flatten, Linear
    6. from tensorboardX import SummaryWriter

    2. 数据集

    1. train_data = torchvision.datasets.CIFAR10("./dataset", train = True,
    2. download = False,
    3. transform = torchvision.transforms.ToTensor())
    4. test_data = torchvision.datasets.CIFAR10("./dataset", train = False,
    5. download = False,
    6. transform = torchvision.transforms.ToTensor())

    3. 数据加载

    1. train_data_size = len(train_data)
    2. # 测试集大小
    3. test_data_size = len(test_data)
    4. train_loader = DataLoader(train_data, batch_size=64)
    5. test_loader = DataLoader(test_data, batch_size = 64)

    4. 模型定义

    1. class Classify(nn.Module):
    2. def __init__(self):
    3. super().__init__()
    4. self.model1 = nn.Sequential(
    5. Conv2d(3, 32, 5, padding=2, stride=1),
    6. MaxPool2d(2),
    7. Conv2d(32,32,5, padding=2, stride=1),
    8. MaxPool2d(2),
    9. Conv2d(32, 64, 5, padding=2 ,stride=1),
    10. MaxPool2d(2),
    11. Flatten(),
    12. Linear(1024,64),
    13. Linear(64,10)
    14. )
    15. def forward(self,x):
    16. return self.model1(x)

    5. 优化器和损失计算

    1. model = Classify()
    2. # 计算损失
    3. loss_fn = nn.CrossEntropyLoss()
    4. # 学习率
    5. learning_rate = 1e-2
    6. # 优化器
    7. optim = torch.optim.SGD(model.parameters(), lr = learning_rate)

    6. 参数设置

    1. # 设置参数
    2. # 训练次数
    3. total_train_step = 0
    4. # 测试次数
    5. total_test_step = 0
    6. # 训练轮数
    7. epoch = 4

    7. 训练and可视化

    ```python writer = SummaryWriter(“logs”) for i in range(epoch): print(“————————第{}轮训练”.format(i+1))

    model.train()只对特定层起作用,例如deopout层,batchnorm层

    model.train() for idx, data in enumerate(train_loader):

    1. imgs, targers = data
    2. outputs = model(imgs)
    3. # 计算损失
    4. loss = loss_fn(outputs, targers)
    5. # 梯度清零
    6. optim.zero_grad()
    7. # 反向传播
    8. loss.backward()
    9. # 更新梯度
    10. optim.step()
    11. total_train_step = total_train_step + 1
    12. if idx % 100 == 0:
    13. writer.add_scalar("train_loss_4",loss.item(),idx )
    14. print("训练次数为:{},loss为{}".format(idx + 1,loss))

    测试阶段

    model.eval()只对特定层起作用,例如deopout层,batchnorm层

    model.eval()

    测试集损失

    total_test_loss = 0

    正确率总和

    total_accuracy = 0 with torch.no_grad():

    1. for idx, data in enumerate(test_loader):
    2. imgs, targers = data
    3. outputs = model(imgs)
    4. loss = loss_fn(outputs, targers)
    5. total_test_loss = total_test_loss + loss
    6. # 计算正确率
    7. accuracy = (outputs.argmax(1) == targers).sum()
    8. # 准确率求和
    9. total_accuracy = total_accuracy + accuracy

    print(“整体测试集上的loss为:{}”.format(total_test_loss)) print(“整体测试集上的正确率为:{}”.format(total_accuracy/test_data_size)) writer.add_scalar(“test_loss_4”, total_test_loss.item(),total_test_step) writer.add_scalar(“test_accurary_4”,total_accuracy/test_data_size, total_test_step) total_test_step = total_test_step + 1

    torch.save(model, “model_{}.pth”.format(i)) print(“模型已保存!”) writer.close()

```