1. #训练函数部分
    2. def train(i,train_iter, test_iter, net, loss, optimizer, device, num_epochs):
    3. net = net.to(device)
    4. print("training on ", device)
    5. start = time.time()
    6. test_acc_max_l = []
    7. train_acc_max_l = []
    8. train_l_min_l=[]
    9. for epoch in range(num_epochs): #迭代100次
    10. batch_count = 0
    11. train_l_sum, train_acc_sum, test_acc_sum, n = 0.0, 0.0, 0.0, 0
    12. for X, y in train_iter:
    13. X = X.to(device)
    14. y = y.to(device)
    15. y_hat = net(X)
    16. l = loss(y_hat, y)
    17. optimizer.zero_grad()
    18. l.backward()
    19. optimizer.step()
    20. train_l_sum += l.cpu().item()
    21. train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
    22. n += y.shape[0]
    23. batch_count += 1
    24. #至此,一个epoches完成
    25. test_acc_sum= d2l.evaluate_accuracy(test_iter, net)
    26. train_l_min_l.append(train_l_sum/batch_count)
    27. train_acc_max_l.append(train_acc_sum/n)
    28. test_acc_max_l.append(test_acc_sum)
    29. print('fold %d epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
    30. % (i+1,epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc_sum))
    31. #train_l_min_l.sort()
    32. #¥train_acc_max_l.sort()
    33. index_max=test_acc_max_l.index(max(test_acc_max_l))
    34. f = open("./shallow/results.txt", "a")
    35. if i==0:
    36. f.write("%d fold"+" "+"train_loss"+" "+"train_acc"+" "+"test_acc")
    37. f.write('\n' +"fold"+str(i+1)+":"+str(train_l_min_l[index_max]) + " ;" + str(train_acc_max_l[index_max]) + " ;" + str(test_acc_max_l[index_max]))
    38. f.close()
    39. print('fold %d, train_loss_min %.4f, train acc max%.4f, test acc max %.4f, time %.1f sec'
    40. % (i + 1, train_l_min_l[index_max], train_acc_max_l[index_max], test_acc_max_l[index_max], time.time() - start))
    41. return train_l_min_l[index_max],train_acc_max_l[index_max],test_acc_max_l[index_max]