数据准备

  1. def ready_data():
  2. """准备数据"""
  3. x_value = [i for i in range(11)]
  4. x_train = np.array(x_value, dtype=np.float32)
  5. x_train = x_train.reshape(-1, 1)
  6. y_value = [(2 * i + 2) for i in x_value]
  7. y_train = np.array(y_value, dtype=np.float32)
  8. y_train = y_train.reshape(-1, 1)
  9. return x_train, y_train

建立模型

  1. class LinearRegressionModel(nn.Module):
  2. """建模"""
  3. def __init__(self, input_dim, output_dim):
  4. super().__init__()
  5. self.linear = nn.Linear(input_dim, output_dim)
  6. def forward(self, x):
  7. out = self.linear(x)
  8. return out

优化策略

  1. input_dim = 1
  2. output_dim = 1
  3. model = LinearRegressionModel(input_dim=input_dim, output_dim=output_dim)
  4. print(model)
  5. epochs = 1000
  6. learning_rate = 0.01
  7. optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
  8. criterion = nn.MSELoss()
  9. x_train, y_train = ready_data()
  10. for epoch in range(epochs):
  11. inputs = torch.from_numpy(x_train)
  12. labels = torch.from_numpy(y_train)
  13. # 将梯度清零
  14. optimizer.zero_grad()
  15. # 前向传播
  16. outputs = model(inputs)
  17. # 计算损失
  18. loss = criterion(outputs, labels)
  19. # 反向传播
  20. loss.backward()
  21. # 更新参数
  22. optimizer.step()
  23. if epoch % 50 == 0:
  24. print('epoch {}, loss {}'.format(epoch, loss.item()))

梯度清零

  1. optimizer.zero_grad()

前向传播

  1. outputs = model(inputs)

计算损失

  1. loss = criterion(outputs, labels)

反向传播

  1. loss.backward()

更新参数

  1. optimizer.step()

模型保存

  1. torch.save(model.state_dict(), 'model.pkl')

模型读取

  1. model.load_state_dict(torch.load('model.pkl'))