import numpy as npimport torchfrom torch.utils import datafrom d2l import torch as d2l"""生成数据"""true_w = torch.tensor([2, -3.4])true_b = 4.2features, labels = d2l.synthetic_data(true_w, true_b, 1000)
def load_array(data_arrays, batch_size, is_train=True): #@save """构造一个PyTorch数据迭代器。""" dataset = data.TensorDataset(*data_arrays) """shuffle=is_train:打乱数据顺序""" return data.DataLoader(dataset, batch_size, shuffle=is_train)batch_size = 10"""形成一个新的数据集"""data_iter = load_array((features, labels), batch_size)
next(iter(data_iter))
[tensor([[ 0.7836, 1.0064], [-0.6185, -1.3923], [-0.6124, 0.4895], [ 0.4377, 0.0993], [ 0.4083, 0.9128], [-1.2252, 0.1269], [-0.4364, -0.5128], [-0.4539, 0.6795], [-1.4068, -0.0619], [-0.9712, -0.1202]]), tensor([[2.3406], [7.6942], [1.3251], [4.7355], [1.9154], [1.3303], [5.0822], [0.9745], [1.5959], [2.6766]])]
# `nn` 是神经网络的缩写from torch import nnnet = nn.Sequential(nn.Linear(2, 1))
net[0].weight.data.normal_(0, 0.01)net[0].bias.data.fill_(0)
tensor([0.])
loss = nn.MSELoss()
trainer = torch.optim.SGD(net.parameters(), lr=0.03)
num_epochs = 3for epoch in range(num_epochs): for X, y in data_iter: l = loss(net(X) ,y) trainer.zero_grad() l.backward() trainer.step() l = loss(net(features), labels) print(f'epoch {epoch + 1}, loss {l:f}')
epoch 1, loss 0.000255epoch 2, loss 0.000098epoch 3, loss 0.000097