数据准备
def ready_data(): """准备数据""" x_value = [i for i in range(11)] x_train = np.array(x_value, dtype=np.float32) x_train = x_train.reshape(-1, 1) y_value = [(2 * i + 2) for i in x_value] y_train = np.array(y_value, dtype=np.float32) y_train = y_train.reshape(-1, 1) return x_train, y_train
建立模型
class LinearRegressionModel(nn.Module): """建模""" def __init__(self, input_dim, output_dim): super().__init__() self.linear = nn.Linear(input_dim, output_dim) def forward(self, x): out = self.linear(x) return out
优化策略
input_dim = 1output_dim = 1model = LinearRegressionModel(input_dim=input_dim, output_dim=output_dim)print(model)epochs = 1000learning_rate = 0.01optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)criterion = nn.MSELoss()x_train, y_train = ready_data()for epoch in range(epochs): inputs = torch.from_numpy(x_train) labels = torch.from_numpy(y_train) # 将梯度清零 optimizer.zero_grad() # 前向传播 outputs = model(inputs) # 计算损失 loss = criterion(outputs, labels) # 反向传播 loss.backward() # 更新参数 optimizer.step() if epoch % 50 == 0: print('epoch {}, loss {}'.format(epoch, loss.item()))
梯度清零
optimizer.zero_grad()
前向传播
outputs = model(inputs)
计算损失
loss = criterion(outputs, labels)
反向传播
loss.backward()
更新参数
optimizer.step()
模型保存
torch.save(model.state_dict(), 'model.pkl')
模型读取
model.load_state_dict(torch.load('model.pkl'))