基本语句

  • 模型放到GPU上运行 model.gpu() , 默认只使用一个GPU
  • 张量放到GPU上 mytensor = my_tensor.gpu()
  • 多个GPU调用: model = nn.DataParallel(model)
  1. model = Model(input_size, output_size)
  2. if torch.cuda.device_count() > 1:
  3. print("Let's use", torch.cuda.device_count(), "GPUs!")
  4. # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
  5. model = nn.DataParallel(model)
  6. if torch.cuda.is_available():
  7. model.cuda()
  8. for data in rand_loader:
  9. if torch.cuda.is_available():
  10. input_var = Variable(data.cuda())
  11. else:
  12. input_var = Variable(data)
  13. output = model(input_var)
  14. print("Outside: input size", input_var.size(),
  15. "output_size", output.size())