张量

  1. from __future__ import print_function
  2. import torch
  3. x = torch.empty(5, 3)
  4. print(x)
  5. tensor([[0., 0., 0.],
  6. [0., 0., 0.],
  7. [0., 0., 0.],
  8. [0., 0., 0.],
  9. [0., 0., 0.]])
  10. x = torch.rand(5, 3)
  11. print(x)
  12. tensor([[0.9009, 0.7387, 0.1229],
  13. [0.1673, 0.8125, 0.8828],
  14. [0.3425, 0.0978, 0.9591],
  15. [0.4853, 0.2610, 0.1937],
  16. [0.1460, 0.8068, 0.9388]])
  17. x = torch.zeros(5, 3, dtype=torch.long)
  18. print(x)
  19. tensor([[0, 0, 0],
  20. [0, 0, 0],
  21. [0, 0, 0],
  22. [0, 0, 0],
  23. [0, 0, 0]])
  24. x = torch.tensor([5.5, 3])
  25. print(x)
  26. tensor([5.5000, 3.0000])
  27. x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizes
  28. print(x)
  29. x = torch.randn_like(x, dtype=torch.float) # override dtype!
  30. print(x) # result has the same size
  31. tensor([[1., 1., 1.],
  32. [1., 1., 1.],
  33. [1., 1., 1.],
  34. [1., 1., 1.],
  35. [1., 1., 1.]], dtype=torch.float64)
  36. tensor([[-0.9599, -0.7486, -0.0417],
  37. [ 2.2920, 1.0358, 0.9876],
  38. [ 0.5569, -1.9474, -2.2035],
  39. [ 1.2106, -0.5178, 1.4441],
  40. [ 0.3283, -1.8450, -1.4182]])
  41. y = torch.rand(5, 3)
  42. print(x + y)
  43. # Addition: providing an output tensor as argument
  44. result = torch.empty(5, 3)
  45. torch.add(x, y, out=result)
  46. print(result)
  47. # Addition: in-place
  48. # adds x to y
  49. y.add_(x)
  50. print(y)
  1. # Resizing: If you want to resize/reshape tensor, you can use torch.view:
  2. x = torch.randn(4, 4)
  3. y = x.view(16)
  4. z = x.view(-1, 8) # the size -1 is inferred from other dimensions
  5. print(x.size(), y.size(), z.size())
  6. torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])
  7. # If you have a one element tensor, use .item() to get the value as a Python number
  8. x = torch.randn(1)
  9. print(x)
  10. print(x.item())
  11. tensor([-1.3421])
  12. -1.3421080112457275
  13. # Converting a Torch Tensor to a NumPy Array
  14. a = torch.ones(5)
  15. print(a)
  16. b = a.numpy()
  17. print(b)
  18. # Converting NumPy Array to Torch Tensor
  19. import numpy as np
  20. a = np.ones(5)
  21. b = torch.from_numpy(a)
  22. np.add(a, 1, out=a)
  23. print(a)
  24. print(b)
  1. # Tensors can be moved onto any device using the .to method.
  2. # let us run this cell only if CUDA is available
  3. # We will use ``torch.device`` objects to move tensors in and out of GPU
  4. if torch.cuda.is_available():
  5. device = torch.device("cuda") # a CUDA device object
  6. y = torch.ones_like(x, device=device) # directly create a tensor on GPU
  7. x = x.to(device) # or just use strings ``.to("cuda")``
  8. z = x + y
  9. print(z)
  10. print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together!

自动微分

torch.Tensor 是包的核心类。如果将其属性 .requires_grad 设置为 True,则会开始跟踪针对 tensor 的所有操作。完成计算后,您可以调用 .backward() 来自动计算所有梯度。该张量的梯度将累积到 .grad 属性中。

要停止 tensor 历史记录的跟踪,您可以调用 .detach(),它将其与计算历史记录分离,并防止将来的计算被跟踪。

要停止跟踪历史记录(和使用内存),您还可以将代码块使用 with torch.no_grad(): 包装起来。在评估模型时,这是特别有用,因为模型在训练阶段具有 requires_grad = True 的可训练参数有利于调参,但在评估阶段我们不需要梯度。

  1. import torch
  2. x = torch.ones(2, 2, requires_grad=True) # 创建一个张量,设置requires_grad=True 来跟踪与它相关的计算
  3. y = x + 2
  4. z = y * y * 3
  5. out = z.mean()
  6. out.backward() # 后向传播
  7. print(x.grad) # tensor([[4.5000, 4.5000],
  8. # [4.5000, 4.5000]])

神经网络

神经网络通过torch.nn包来构建。
一个典型的神经网络训练过程包括以下几点:

  1. 定义一个包含可训练参数的神经网络
  2. 迭代整个输入
  3. 通过神经网络处理输入
  4. 计算损失(loss)
  5. 反向传播梯度到神经网络的参数
  6. 更新网络的参数,典型的用一个简单的更新方法:weight = weight - learning_rate *gradient
  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F
  4. class Net(nn.Module):
  5. def __init__(self):
  6. super(Net, self).__init__()
  7. # 1 input image channel, 6 output channels, 5x5 square convolution
  8. # kernel
  9. self.conv1 = nn.Conv2d(1, 6, 5)
  10. self.conv2 = nn.Conv2d(6, 16, 5)
  11. # an affine operation: y = Wx + b
  12. self.fc1 = nn.Linear(16 * 5 * 5, 120)
  13. self.fc2 = nn.Linear(120, 84)
  14. self.fc3 = nn.Linear(84, 10)
  15. def forward(self, x):
  16. # Max pooling over a (2, 2) window
  17. x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
  18. # If the size is a square you can only specify a single number
  19. x = F.max_pool2d(F.relu(self.conv2(x)), 2)
  20. x = x.view(-1, self.num_flat_features(x))
  21. x = F.relu(self.fc1(x))
  22. x = F.relu(self.fc2(x))
  23. x = self.fc3(x)
  24. return x
  25. def num_flat_features(self, x):
  26. size = x.size()[1:] # all dimensions except the batch dimension
  27. num_features = 1
  28. for s in size:
  29. num_features *= s
  30. return num_features
  31. net = Net()
  32. print(net)
  1. Net(
  2. (conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
  3. (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
  4. (fc1): Linear(in_features=400, out_features=120, bias=True)
  5. (fc2): Linear(in_features=120, out_features=84, bias=True)
  6. (fc3): Linear(in_features=84, out_features=10, bias=True)
  7. )

一个模型可训练的参数可以通过调用 net.parameters() 返回:

  1. params = list(net.parameters())
  2. print(len(params))
  3. print(params[0].size()) # conv1's .weight

输出:

  1. 10
  2. torch.Size([6, 1, 5, 5])

让我们尝试随机生成一个 32x32 的输入。注意:期望的输入维度是 32x32 。为了使用这个网络在 MNIST 数据及上,你需要把数据集中的图片维度修改为 32x32。

  1. input = torch.randn(1, 1, 32, 32)
  2. out = net(input)
  3. print(out)
  1. tensor([[-0.0233, 0.0159, -0.0249, 0.1413, 0.0663, 0.0297, -0.0940, -0.0135,
  2. 0.1003, -0.0559]], grad_fn=<AddmmBackward>)

把所有参数梯度缓存器置零,用随机的梯度来反向传播

  1. net.zero_grad()
  2. out.backward(torch.randn(1, 10))

一个简单的损失函数就是 nn.MSELoss

  1. output = net(input)
  2. target = torch.randn(10) # a dummy target, for example
  3. target = target.view(1, -1) # make it the same shape as output
  4. criterion = nn.MSELoss()
  5. loss = criterion(output, target)
  6. print(loss)

http://pytorch123.com/SecondSection/neural_networks/