基本语句
- 模型放到GPU上运行
model.gpu() , 默认只使用一个GPU - 张量放到GPU上
mytensor = my_tensor.gpu() - 多个GPU调用:
model = nn.DataParallel(model)
model = Model(input_size, output_size)if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs model = nn.DataParallel(model)if torch.cuda.is_available(): model.cuda()for data in rand_loader: if torch.cuda.is_available(): input_var = Variable(data.cuda()) else: input_var = Variable(data) output = model(input_var) print("Outside: input size", input_var.size(), "output_size", output.size())