一、准备数据
在Pytorch中构建图片数据管道通常有三种方法
- 第一种是使用torchvision中的datasets.ImageFolder来读取图片,然后用DataLoader来并行加载。
- 第二种是通过继承torch.utils.data.Dataset实现用户自定义读取逻辑然后用DataLoader来并行加载。
- 第三种方法是读取用户自定义数据集的通用方法,既可以读取图片数据集,也可以读取文本数据集。
这里使用第一种方法
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms,datasets
transform_train = transforms.Compose(
[transforms.ToTensor()])
transform_valid = transforms.Compose(
[transforms.ToTensor()])
ds_train = datasets.ImageFolder("../data/cifar2/train/",
transform = transform_train, target_transform = lambda t: torch.tensor([t]).float())
ds_valid = datasets.ImageFolder("../data/cifar2/test/",
transform = transform_valid,target_transform= lambda t:torch.tensor([t]).float())
print(ds_train.class_to_idx)
dl_train = DataLoader(ds_train, batch_size = 50, shuffle = True, num_workers = 3)
dl_valid = DataLoader(ds_train, batch_size = 50, shuffle = True, num_workers = 3)
查看部分样本
from matplotlib import pyplot as plt
plt.figure(figsize=(8,8))
for i in range(9):
img, label = ds_train[i]
img = img.permute(1,2,0)
ax = plt.subplot(3,3,i+1)
ax.imshow(img.numpy())
ax.set_title("label = %d"%label.item())
ax.set_xticks([])
ax.set_yticks([])
plt.show()
pytorch 的图片默认顺序是: Batch, Channel, Width, Height
for x, y in dl_train:
print(x.shape, y.shape)
break
>>> torch.Size([50, 3, 32, 32]) torch.Size([50, 1])
二,定义模型
使用Pytorch通常有三种方式构建模型:
- 使用nn.Sequential 按层顺序构建模型(nn.Sequential)
- 继承nn.Module基类构建自定义模型(nn.ModuleList)
- 继承nn.Module基类构建模型并辅助应用模型容器(nn.ModuleDict)
#测试AdaptiveMaxPool2d的效果
pool = nn.AdaptiveMaxPool2d((1,1))
t = torch.randn(10,8,32,32)
pool(t).shape
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(in_channels=32, out_channles=64, kernel_size=5)
self.dropout = nn.Dropout2d(p=0.1)
self.adaptive_pool = nn.AdaptiveMaxPool2d((1,1))
self.flatten = nn.Flatten()
self.linear1 = nn.Linear(64,32)
self.relu = nn.ReLU()
self.linear2 = nn.Linear(32,1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.conv2(x)
x = self.pool(x)
x = self.dropout(x)
x = self.adaptive_pool(x)
x = self.flatten(x)
x = self.linear1(x)
x = self.relu(x)
y = self.sigmoid(x)
return y
net = Net()
print(net)
使用torchkeras打印网络模型
import torchkeras
torchkears.summary(net, input_shape=(3, 32, 32))
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 32, 30, 30] 896
MaxPool2d-2 [-1, 32, 15, 15] 0
Conv2d-3 [-1, 64, 11, 11] 51,264
MaxPool2d-4 [-1, 64, 5, 5] 0
Dropout2d-5 [-1, 64, 5, 5] 0
AdaptiveMaxPool2d-6 [-1, 64, 1, 1] 0
Flatten-7 [-1, 64] 0
Linear-8 [-1, 32] 2,080
ReLU-9 [-1, 32] 0
Linear-10 [-1, 1] 33
Sigmoid-11 [-1, 1] 0
================================================================
Total params: 54,273
Trainable params: 54,273
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.011719
Forward/backward pass size (MB): 0.359634
Params size (MB): 0.207035
Estimated Total Size (MB): 0.578388
----------------------------------------------------------------
三,训练模型
有三种典型训练循环代码风格:
- 脚本形式训练循环
- 函数形式训练循环
- 类形式训练循环
函数形式循环训练
import pandas as pd
from sklearn.metrics import roc_auc_score
model = net
model.optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
model.loss_func = torch.nn.BCELoss()
modle.metric_func = lambda y_pred, y_true: roc_auc_score(y_true.data.numpy(), y_pred.data.numpy())
modle.metric_name = "auc"
def train_step(modle, features, labels):
# 训练模式, dropout层发生作用,使用model.train()
model.train()
# 梯度清零
model.optimizer.zero_grad()
# 正向传播求损失
predictions = model(features)
loss = model.loss_func(predictions, labels)
metric = model.metric_func(predictions, labels)
# 反向传播求梯度
loss.backward()
model.optimizer.step()
return loss.item(), metric.item()
def valid_step(model, features, labels):
# 预测模式, dropout层不发生作用
model.eval()
# 关闭梯度计算
with torch.no_grad():
predictions = model(features)
loss = model.loss_func(predictions, labels)
metric = model.metric_func(predictions, labels)
return loss.item(), metric.item()
# 测试train_step效果
features, labels = next(iter(dl_train))
train_step(model, features, labels)
def train_model(model, epochs, dl_train, dl_valid, log_step_freq):
metric_name = model.metric_name
dfhistory = pd.DataFrame(columns = ["epoch", "loss", metric_name, "val_loss", "val_"+metric_name])
print("Start Training...")
nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("==========="*8 + "%s"%nowtime)
for epoch in range(1, epochs+1):
# 1, 训练循环
loss_num = 0.0
metric_sum = 0.0
step = 1
for step, (features, labels) in enumerate(dl_train, 1):
loss, metric = train_step(model, features, labels)
loss_sum += loss
metric_sum += metric
if step%log_step_freq == 0:
print(("[step = %d] loss: %.3f, "+metric_name+":%.3f")%
(step, loss_sum/step, metric_sum/step)
)
# 2, 验证循环
val_loss_sum = 0.0
val_metric_sum = 0.0
val_step = 1
for val_step, (features, labels) in enumerate(dl_valid, 1):
val_loss, val_metric = valid_step(model, features, labels)
val_loss_sum += val_loss
val_metric_sum += val_metric
# 3, 记录日志
info = (epoch, loss_sum/step, metric_sum/step,
val_loss_sum/val_step, val_metric_sum/val_step)
dfhistory.loc[epoch-1] = info
# 打印epoch级别日志
print(("\nEPOCH = %d, loss = %.3f,"+ metric_name + \
" = %.3f, val_loss = %.3f, "+"val_"+ metric_name+" = %.3f")
%info)
nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("\n"+"=========="*8 + "%s"%nowtime)