张量
from __future__ import print_functionimport torchx = torch.empty(5, 3)print(x)tensor([[0., 0., 0.],[0., 0., 0.],[0., 0., 0.],[0., 0., 0.],[0., 0., 0.]])x = torch.rand(5, 3)print(x)tensor([[0.9009, 0.7387, 0.1229],[0.1673, 0.8125, 0.8828],[0.3425, 0.0978, 0.9591],[0.4853, 0.2610, 0.1937],[0.1460, 0.8068, 0.9388]])x = torch.zeros(5, 3, dtype=torch.long)print(x)tensor([[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0]])x = torch.tensor([5.5, 3])print(x)tensor([5.5000, 3.0000])x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizesprint(x)x = torch.randn_like(x, dtype=torch.float) # override dtype!print(x) # result has the same sizetensor([[1., 1., 1.],[1., 1., 1.],[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]], dtype=torch.float64)tensor([[-0.9599, -0.7486, -0.0417],[ 2.2920, 1.0358, 0.9876],[ 0.5569, -1.9474, -2.2035],[ 1.2106, -0.5178, 1.4441],[ 0.3283, -1.8450, -1.4182]])y = torch.rand(5, 3)print(x + y)# Addition: providing an output tensor as argumentresult = torch.empty(5, 3)torch.add(x, y, out=result)print(result)# Addition: in-place# adds x to yy.add_(x)print(y)
# Resizing: If you want to resize/reshape tensor, you can use torch.view:x = torch.randn(4, 4)y = x.view(16)z = x.view(-1, 8) # the size -1 is inferred from other dimensionsprint(x.size(), y.size(), z.size())torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])# If you have a one element tensor, use .item() to get the value as a Python numberx = torch.randn(1)print(x)print(x.item())tensor([-1.3421])-1.3421080112457275# Converting a Torch Tensor to a NumPy Arraya = torch.ones(5)print(a)b = a.numpy()print(b)# Converting NumPy Array to Torch Tensorimport numpy as npa = np.ones(5)b = torch.from_numpy(a)np.add(a, 1, out=a)print(a)print(b)
# Tensors can be moved onto any device using the .to method.# let us run this cell only if CUDA is available# We will use ``torch.device`` objects to move tensors in and out of GPUif torch.cuda.is_available():device = torch.device("cuda") # a CUDA device objecty = torch.ones_like(x, device=device) # directly create a tensor on GPUx = x.to(device) # or just use strings ``.to("cuda")``z = x + yprint(z)print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together!
自动微分
torch.Tensor 是包的核心类。如果将其属性 .requires_grad 设置为 True,则会开始跟踪针对 tensor 的所有操作。完成计算后,您可以调用 .backward() 来自动计算所有梯度。该张量的梯度将累积到 .grad 属性中。
要停止 tensor 历史记录的跟踪,您可以调用 .detach(),它将其与计算历史记录分离,并防止将来的计算被跟踪。
要停止跟踪历史记录(和使用内存),您还可以将代码块使用 with torch.no_grad(): 包装起来。在评估模型时,这是特别有用,因为模型在训练阶段具有 requires_grad = True 的可训练参数有利于调参,但在评估阶段我们不需要梯度。
import torchx = torch.ones(2, 2, requires_grad=True) # 创建一个张量,设置requires_grad=True 来跟踪与它相关的计算y = x + 2z = y * y * 3out = z.mean()out.backward() # 后向传播print(x.grad) # tensor([[4.5000, 4.5000],# [4.5000, 4.5000]])
神经网络
神经网络通过torch.nn包来构建。
一个典型的神经网络训练过程包括以下几点:
- 定义一个包含可训练参数的神经网络
- 迭代整个输入
- 通过神经网络处理输入
- 计算损失(loss)
- 反向传播梯度到神经网络的参数
- 更新网络的参数,典型的用一个简单的更新方法:weight = weight - learning_rate *gradient
import torchimport torch.nn as nnimport torch.nn.functional as Fclass Net(nn.Module):def __init__(self):super(Net, self).__init__()# 1 input image channel, 6 output channels, 5x5 square convolution# kernelself.conv1 = nn.Conv2d(1, 6, 5)self.conv2 = nn.Conv2d(6, 16, 5)# an affine operation: y = Wx + bself.fc1 = nn.Linear(16 * 5 * 5, 120)self.fc2 = nn.Linear(120, 84)self.fc3 = nn.Linear(84, 10)def forward(self, x):# Max pooling over a (2, 2) windowx = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))# If the size is a square you can only specify a single numberx = F.max_pool2d(F.relu(self.conv2(x)), 2)x = x.view(-1, self.num_flat_features(x))x = F.relu(self.fc1(x))x = F.relu(self.fc2(x))x = self.fc3(x)return xdef num_flat_features(self, x):size = x.size()[1:] # all dimensions except the batch dimensionnum_features = 1for s in size:num_features *= sreturn num_featuresnet = Net()print(net)
Net((conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))(conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))(fc1): Linear(in_features=400, out_features=120, bias=True)(fc2): Linear(in_features=120, out_features=84, bias=True)(fc3): Linear(in_features=84, out_features=10, bias=True))
一个模型可训练的参数可以通过调用 net.parameters() 返回:
params = list(net.parameters())print(len(params))print(params[0].size()) # conv1's .weight
输出:
10torch.Size([6, 1, 5, 5])
让我们尝试随机生成一个 32x32 的输入。注意:期望的输入维度是 32x32 。为了使用这个网络在 MNIST 数据及上,你需要把数据集中的图片维度修改为 32x32。
input = torch.randn(1, 1, 32, 32)out = net(input)print(out)
tensor([[-0.0233, 0.0159, -0.0249, 0.1413, 0.0663, 0.0297, -0.0940, -0.0135,0.1003, -0.0559]], grad_fn=<AddmmBackward>)
把所有参数梯度缓存器置零,用随机的梯度来反向传播
net.zero_grad()out.backward(torch.randn(1, 10))
一个简单的损失函数就是 nn.MSELoss
output = net(input)target = torch.randn(10) # a dummy target, for exampletarget = target.view(1, -1) # make it the same shape as outputcriterion = nn.MSELoss()loss = criterion(output, target)print(loss)
