一、Sequential的使用

image.png


Tips:实际Flatten和Fully connented ,Fully connented 和output层之间是全连接层

  1. 导包

    1. import torch
    2. from torch import nn
    3. from torch.nn import Conv2d, MaxPool2d,Flatten, Linear
    4. from tensorboardX import SummaryWriter
  2. 定义网络 ```python class Classify(nn.Module): def init(self):

    1. super().__init__()
    2. """
    3. # 使用普通方式
    4. self.conv1 = Conv2d(3, 32, 5, padding=2, stride=1)
    5. self.maxpool1 = MaxPool2d(2)
    6. self.conv2 = Conv2d(32,32,5, padding=2, stride=1)
    7. self.maxpool2 = MaxPool2d(2)
    8. self.conv3 = Conv2d(32, 64, 5, padding=2 ,stride=1)
    9. self.maxpool3 = MaxPool2d(2)
    10. self.flatten = Flatten()
    11. self.linear1 = Linear(1024,64)
    12. self.linear2 = Linear(64,10)
    13. """
    14. self.model1 = nn.Sequential(
    15. Conv2d(3, 32, 5, padding=2, stride=1),
    16. MaxPool2d(2),
    17. Conv2d(32,32,5, padding=2, stride=1),
    18. MaxPool2d(2),
    19. Conv2d(32, 64, 5, padding=2 ,stride=1),
    20. MaxPool2d(2),
    21. Flatten(),
    22. Linear(1024,64),
    23. Linear(64,10)
    24. )
  1. def forward(self,x):
  2. """
  3. x = self.conv1(x)
  4. x = self.maxpool1(x)
  5. x = self.conv2(x)
  6. x = self.maxpool2(x)
  7. x = self.conv3(x)
  8. x = self.maxpool3(x)
  9. x = self.flatten(x)
  10. x = self.flatten(x)
  11. x = self.linear1(x)
  12. x = self.linear2(x)
  13. """
  14. x = self.model1(x)
  15. return x
  1. 3. 运行
  2. ```python
  3. writer = SummaryWriter("logs")
  4. model = Classify()
  5. input = torch.ones((64,3,32,32))
  6. output = model(x)
  7. writer.add_graph(model, input)
  8. writer.close()

image.png

二、优化器和损失函数

  1. # 定义数据集和数据加载器
  2. dataset = torchvision.datasets.CIFAR10("./dataset", train = False, transform = torchvision.transforms.ToTensor(), download = False)
  3. dataloader = DataLoader(dataset,batch_size = 32)
  1. # 构造模型
  2. model = Classify()
  3. #----------------------------------#
  4. # 构造优化器(随机梯度下降)
  5. # model.parameters() 为网络的参数
  6. # lr为学习率
  7. #----------------------------------#
  8. optim = torch.optim.SGD(model.parameters(), lr=0.01)
  9. # 计算交叉熵损失
  10. loss = nn.CrossEntropyLoss()
  11. writer = SummaryWriter("logs")
  12. # 20个epoch
  13. for epoch in range(20):
  14. # runing_loss为本次epoch中所有损失的和
  15. runing_loss = 0.0
  16. for idx,data in enumerate(dataloader):
  17. imgs ,targets = data
  18. outputs = model(imgs)
  19. # 计算本次batch的损失
  20. result_loss = loss(outputs, targets)
  21. # 求和
  22. runing_loss = runing_loss + result_loss
  23. #-------------------#
  24. # zero_grad():清空所管理参数的梯度。
  25. # 由于 PyTorch 的特性是张量的梯度不自动清零,因此每次反向传播之后都需要清空梯度
  26. # optim为定义的随机梯度下降优化器
  27. #-------------------#
  28. optim.zero_grad()
  29. # 反向传播(为batch,不是epoch)
  30. result_loss.backward()
  31. # 执行一步梯度更新
  32. optim.step()
  33. print(runing_loss)
  34. writer.close()