Tensorflow和Pytorch是现有的两种主流的深度学习框架,各有优缺点,前者以强大的静态图计算闻名,后者以强大的动态图计算闻名。下面我通过两种框架分别训练卷积神经网络来对CIFAR10数据集进行预测。值得一提的是,对于这类的训练问题,只要掌握了使用框架进行深度学习的套路,基本上解决大多数问题都和下面这个示例的思路相差无几。

Tensorflow训练CNN

导入相关库

代码如下:

  1. import tensorflow as tf
  2. from tensorflow import keras
  3. from keras.datasets import cifar10

数据读取和预处理

代码如下:

  1. #数据读取
  2. (train_data,train_label),(test_data,test_label) = cifar10.load_data()
  3. #数据预处理
  4. x_data = train_data.astype('float32')/255
  5. y_data = test_data.astype('float32')/255
  6. #标签预处理
  7. import numpy as np
  8. def one_hot(label,num_classes):
  9. label_one_hot = np.eye(num_classes)[label]
  10. return label_one_hot
  11. num_classes = 10
  12. train_label = train_label.astype('int32')
  13. train_label = np.squeeze(train_label)
  14. x_label = one_hot(train_label,num_classes)
  15. test_label = test_label.astype('int32')
  16. y_label = np.squeeze(test_label)
  17. print(train_label[0:5])
  18. print(x_label[0:5])

确定网络结构

代码如下:

  1. #构建网络
  2. from keras import Sequential
  3. from keras.layers import Convolution2D,MaxPooling2D,Dense,Flatten,Dropout
  4. cnn = Sequential()
  5. #unit1
  6. cnn.add(Convolution2D(32,kernel_size=[3,3],input_shape=(32,32,3),activation='relu',padding='same'))
  7. cnn.add(Convolution2D(32,kernel_size=[3,3],activation='relu',padding='same'))
  8. cnn.add(Convolution2D(32,kernel_size=[3,3],activation='relu',padding='same'))
  9. cnn.add(MaxPooling2D(pool_size=[2,2],padding='same'))
  10. cnn.add(Convolution2D(32,kernel_size=[3,3],activation='relu',padding='same'))
  11. cnn.add(MaxPooling2D(pool_size=[2,2],padding='same'))
  12. cnn.add(Dropout(0.5))
  13. #unit2
  14. cnn.add(Convolution2D(64,kernel_size=[3,3],activation='relu',padding='same'))
  15. cnn.add(Convolution2D(64,kernel_size=[3,3],activation='relu',padding='same'))
  16. cnn.add(Convolution2D(64,kernel_size=[3,3],activation='relu',padding='same'))
  17. cnn.add(MaxPooling2D(pool_size=[2,2],padding='same'))
  18. cnn.add(Dropout(0.5))
  19. cnn.add(Flatten())
  20. cnn.add(Dense(512,activation='relu'))
  21. cnn.add(Dropout(0.5))
  22. cnn.add(Dense(128,activation='relu'))
  23. cnn.add(Dropout(0.5))
  24. cnn.add(Dense(10,activation='relu'))
  25. cnn.summary()

编译和训练模型

代码如下:

  1. #编译模型
  2. cnn.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-3),loss='categorical_crossentropy',metrics=['acc'])
  3. #训练模型
  4. history_cnn = cnn.fit(x_data,x_label,epochs=20,batch_size=32,shuffle=True,verbose=1,validation_split=0.1)

绘制损失和精度图

代码如下:

  1. #绘制损失和精度图
  2. import matplotlib.pyplot as plt
  3. plt.figure(1)
  4. plt.plot(np.array(history_cnn.history['loss']))
  5. plt.plot(np.array(history_cnn.history['val_loss']))
  6. plt.xlabel('Epoch')
  7. plt.ylabel('Train loss')
  8. plt.legend(['loss','val_loss'])
  9. plt.show()
  10. plt.figure(2)
  11. plt.plot(np.array(history_cnn.history['acc']))
  12. plt.plot(np.array(history_cnn.history['val_acc']))
  13. plt.xlabel('Epoch')
  14. plt.ylabel('Train loss')
  15. plt.legend(['acc','val_acc'])
  16. plt.show()

使用模型进行预测

代码如下:

  1. #保存模型
  2. cnn.save('model/cnn.h5')
  3. #在新数据上生成预测结果
  4. cnn = keras.models.load_model('model/cnn.h5')
  5. test_out = cnn.predict(y_data)
  6. #测试模型准确率
  7. num = 0
  8. total_num = y_data.shape[0]
  9. for i in range(total_num):
  10. predict = np.argmax(test_out[i])
  11. if predict == y_label[i]:
  12. num += 1
  13. accuracy = num/total_num
  14. print(accuracy)

代码汇总

完整代码如下:

import tensorflow as tf
from tensorflow import keras
from keras.datasets import cifar10

#数据读取
(train_data,train_label),(test_data,test_label) = cifar10.load_data()

#数据预处理
x_data = train_data.astype('float32')/255
y_data = test_data.astype('float32')/255

#标签预处理
import numpy as np
def one_hot(label,num_classes):
    label_one_hot = np.eye(num_classes)[label]
    return label_one_hot

num_classes = 10
train_label = train_label.astype('int32')
train_label = np.squeeze(train_label)
x_label = one_hot(train_label,num_classes)
test_label = test_label.astype('int32')
y_label = np.squeeze(test_label)

print(train_label[0:5])
print(x_label[0:5])

#构建网络
from keras import Sequential
from keras.layers import Convolution2D,MaxPooling2D,Dense,Flatten,Dropout
cnn = Sequential()
#unit1
cnn.add(Convolution2D(32,kernel_size=[3,3],input_shape=(32,32,3),activation='relu',padding='same'))
cnn.add(Convolution2D(32,kernel_size=[3,3],activation='relu',padding='same'))
cnn.add(Convolution2D(32,kernel_size=[3,3],activation='relu',padding='same'))
cnn.add(MaxPooling2D(pool_size=[2,2],padding='same'))
cnn.add(Convolution2D(32,kernel_size=[3,3],activation='relu',padding='same'))
cnn.add(MaxPooling2D(pool_size=[2,2],padding='same'))
cnn.add(Dropout(0.5))
#unit2
cnn.add(Convolution2D(64,kernel_size=[3,3],activation='relu',padding='same'))
cnn.add(Convolution2D(64,kernel_size=[3,3],activation='relu',padding='same'))
cnn.add(Convolution2D(64,kernel_size=[3,3],activation='relu',padding='same'))
cnn.add(MaxPooling2D(pool_size=[2,2],padding='same'))
cnn.add(Dropout(0.5))
cnn.add(Flatten())
cnn.add(Dense(512,activation='relu'))
cnn.add(Dropout(0.5))
cnn.add(Dense(128,activation='relu'))
cnn.add(Dropout(0.5))
cnn.add(Dense(10,activation='relu'))
cnn.summary()

#编译模型
cnn.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-3),loss='categorical_crossentropy',metrics=['acc'])
#训练模型
history_cnn = cnn.fit(x_data,x_label,epochs=20,batch_size=32,shuffle=True,verbose=1,validation_split=0.1)
#绘制损失和精度图
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(np.array(history_cnn.history['loss']))
plt.plot(np.array(history_cnn.history['val_loss']))
plt.xlabel('Epoch')
plt.ylabel('Train loss')
plt.legend(['loss','val_loss'])
plt.show()
plt.figure(2)
plt.plot(np.array(history_cnn.history['acc']))
plt.plot(np.array(history_cnn.history['val_acc']))
plt.xlabel('Epoch')
plt.ylabel('Train loss')
plt.legend(['acc','val_acc'])
plt.show()

#保存模型
cnn.save('model/cnn.h5')
#在新数据上生成预测结果
cnn = keras.models.load_model('model/cnn.h5')
test_out = cnn.predict(y_data)
#测试模型准确率
num = 0
total_num = y_data.shape[0]

for i in range(total_num):
    predict = np.argmax(test_out[i])
    if predict == y_label[i]:
        num += 1
accuracy = num/total_num
print(accuracy)

Pytorch训练CNN

导入相关库

代码如下:

import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import time
import os

导入数据集

代码如下:

#导入数据集
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                        download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=100,
                                          shuffle=True, num_workers=2)

testset = torchvision.datasets.CIFAR10(root='./data', train=False,
                                       download=True, transform=transform1)
testloader = torch.utils.data.DataLoader(testset, batch_size=50,
                                         shuffle=False, num_workers=2)

classes = ('plane', 'car', 'bird', 'cat',
           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

数据预处理

代码如下:

#数据预处理
transform = transforms.Compose(
    [
        transforms.RandomHorizontalFlip(),
        transforms.RandomGrayscale(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

transform1 = transforms.Compose(
    [
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

确定卷积网络结构

代码如下:

def __init__(self):
        #定义卷积神经网络的网络结构
        super(Net,self).__init__()
        self.conv1 = nn.Conv2d(3,64,3,padding=1)
        self.conv2 = nn.Conv2d(64,64,3,padding=1)
        self.pool1 = nn.MaxPool2d(2, 2)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu1 = nn.ReLU()

        self.conv3 = nn.Conv2d(64,128,3,padding=1)
        self.conv4 = nn.Conv2d(128, 128, 3,padding=1)
        self.pool2 = nn.MaxPool2d(2, 2, padding=1)
        self.bn2 = nn.BatchNorm2d(128)
        self.relu2 = nn.ReLU()

        self.conv5 = nn.Conv2d(128,128, 3,padding=1)
        self.conv6 = nn.Conv2d(128, 128, 3,padding=1)
        self.conv7 = nn.Conv2d(128, 128, 1,padding=1)
        self.pool3 = nn.MaxPool2d(2, 2, padding=1)
        self.bn3 = nn.BatchNorm2d(128)
        self.relu3 = nn.ReLU()

        self.conv8 = nn.Conv2d(128, 256, 3,padding=1)
        self.conv9 = nn.Conv2d(256, 256, 3, padding=1)
        self.conv10 = nn.Conv2d(256, 256, 1, padding=1)
        self.pool4 = nn.MaxPool2d(2, 2, padding=1)
        self.bn4 = nn.BatchNorm2d(256)
        self.relu4 = nn.ReLU()

        self.conv11 = nn.Conv2d(256, 512, 3, padding=1)
        self.conv12 = nn.Conv2d(512, 512, 3, padding=1)
        self.conv13 = nn.Conv2d(512, 512, 1, padding=1)
        self.pool5 = nn.MaxPool2d(2, 2, padding=1)
        self.bn5 = nn.BatchNorm2d(512)
        self.relu5 = nn.ReLU()

        self.fc14 = nn.Linear(512*4*4,1024)
        self.drop1 = nn.Dropout2d()
        self.fc15 = nn.Linear(1024,1024)
        self.drop2 = nn.Dropout2d()
        self.fc16 = nn.Linear(1024,10)

网络的前向传播

代码如下:

#前向传播,或者对应计算图中的前向模式
    def forward(self,x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.pool1(x)
        x = self.bn1(x)
        x = self.relu1(x)

        x = self.conv3(x)
        x = self.conv4(x)
        x = self.pool2(x)
        x = self.bn2(x)
        x = self.relu2(x)

        x = self.conv5(x)
        x = self.conv6(x)
        x = self.conv7(x)
        x = self.pool3(x)
        x = self.bn3(x)
        x = self.relu3(x)

        x = self.conv8(x)
        x = self.conv9(x)
        x = self.conv10(x)
        x = self.pool4(x)
        x = self.bn4(x)
        x = self.relu4(x)

        x = self.conv11(x)
        x = self.conv12(x)
        x = self.conv13(x)
        x = self.pool5(x)
        x = self.bn5(x)
        x = self.relu5(x)
        # print(" x shape ",x.size())
        x = x.view(-1,512*4*4)
        x = F.relu(self.fc14(x))
        x = self.drop1(x)
        x = F.relu(self.fc15(x))
        x = self.drop2(x)
        x = self.fc16(x)

        return x

后向传播与梯度更新

代码如下:

#使用SGD算法来进行训练和梯度更新
    def train_sgd(self,device):
        #定义Adam优化器
        optimizer = optim.Adam(self.parameters(), lr=0.0001)

        path = 'weights.tar'
        initepoch = 0

        if os.path.exists(path) is not True:
            #使用交叉熵损失函数
            loss = nn.CrossEntropyLoss()
            # optimizer = optim.SGD(self.parameters(),lr=0.01)

        else:
            checkpoint = torch.load(path)
            self.load_state_dict(checkpoint['model_state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            initepoch = checkpoint['epoch']
            loss = checkpoint['loss']

        #训练神经网络
        for epoch in range(initepoch,100):  # loop over the dataset multiple times
            timestart = time.time()

            running_loss = 0.0
            total = 0
            correct = 0
            for i, data in enumerate(trainloader, 0):
                # get the inputs
                inputs, labels = data
                inputs, labels = inputs.to(device),labels.to(device)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward + backward + optimize
                outputs = self(inputs)
                l = loss(outputs, labels)
                l.backward()
                optimizer.step()

                # print statistics
                running_loss += l.item()
                # print("i ",i)
                if i % 500 == 499:  # print every 500 mini-batches
                    print('[%d, %5d] loss: %.4f' %
                          (epoch, i, running_loss / 500))
                    running_loss = 0.0
                    _, predicted = torch.max(outputs.data, 1)
                    total += labels.size(0)
                    correct += (predicted == labels).sum().item()
                    print('Accuracy of the network on the %d tran images: %.3f %%' % (total,
                                                                                      100.0 * correct / total))
                    total = 0
                    correct = 0
                    torch.save({'epoch':epoch,
                                'model_state_dict':net.state_dict(),
                                'optimizer_state_dict':optimizer.state_dict(),
                                'loss':loss
                                },path)

            print('epoch %d cost %3f sec' %(epoch,time.time()-timestart))

        print('Finished Training')

在测试集上测试

代码如下:

#在测试集上测试,得到预测准确率
    def test(self,device):
        correct = 0
        total = 0
        with torch.no_grad():
            for data in testloader:
                images, labels = data
                images, labels = images.to(device), labels.to(device)
                outputs = self(images)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

        print('Accuracy of the network on the 10000 test images: %.3f %%' % (
                100.0 * correct / total))

代码汇总

完整代码如下:

import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import time
import os

#数据预处理
transform = transforms.Compose(
    [
        transforms.RandomHorizontalFlip(),
        transforms.RandomGrayscale(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

transform1 = transforms.Compose(
    [
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

#导入数据集
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                        download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=100,
                                          shuffle=True, num_workers=2)

testset = torchvision.datasets.CIFAR10(root='./data', train=False,
                                       download=True, transform=transform1)
testloader = torch.utils.data.DataLoader(testset, batch_size=50,
                                         shuffle=False, num_workers=2)

classes = ('plane', 'car', 'bird', 'cat',
           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')


#定义卷积神经网络
class Net(nn.Module):
    def __init__(self):
        #定义卷积神经网络的网络结构
        super(Net,self).__init__()
        self.conv1 = nn.Conv2d(3,64,3,padding=1)
        self.conv2 = nn.Conv2d(64,64,3,padding=1)
        self.pool1 = nn.MaxPool2d(2, 2)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu1 = nn.ReLU()

        self.conv3 = nn.Conv2d(64,128,3,padding=1)
        self.conv4 = nn.Conv2d(128, 128, 3,padding=1)
        self.pool2 = nn.MaxPool2d(2, 2, padding=1)
        self.bn2 = nn.BatchNorm2d(128)
        self.relu2 = nn.ReLU()

        self.conv5 = nn.Conv2d(128,128, 3,padding=1)
        self.conv6 = nn.Conv2d(128, 128, 3,padding=1)
        self.conv7 = nn.Conv2d(128, 128, 1,padding=1)
        self.pool3 = nn.MaxPool2d(2, 2, padding=1)
        self.bn3 = nn.BatchNorm2d(128)
        self.relu3 = nn.ReLU()

        self.conv8 = nn.Conv2d(128, 256, 3,padding=1)
        self.conv9 = nn.Conv2d(256, 256, 3, padding=1)
        self.conv10 = nn.Conv2d(256, 256, 1, padding=1)
        self.pool4 = nn.MaxPool2d(2, 2, padding=1)
        self.bn4 = nn.BatchNorm2d(256)
        self.relu4 = nn.ReLU()

        self.conv11 = nn.Conv2d(256, 512, 3, padding=1)
        self.conv12 = nn.Conv2d(512, 512, 3, padding=1)
        self.conv13 = nn.Conv2d(512, 512, 1, padding=1)
        self.pool5 = nn.MaxPool2d(2, 2, padding=1)
        self.bn5 = nn.BatchNorm2d(512)
        self.relu5 = nn.ReLU()

        self.fc14 = nn.Linear(512*4*4,1024)
        self.drop1 = nn.Dropout2d()
        self.fc15 = nn.Linear(1024,1024)
        self.drop2 = nn.Dropout2d()
        self.fc16 = nn.Linear(1024,10)

    #前向传播,或者对应计算图中的前向模式
    def forward(self,x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.pool1(x)
        x = self.bn1(x)
        x = self.relu1(x)

        x = self.conv3(x)
        x = self.conv4(x)
        x = self.pool2(x)
        x = self.bn2(x)
        x = self.relu2(x)

        x = self.conv5(x)
        x = self.conv6(x)
        x = self.conv7(x)
        x = self.pool3(x)
        x = self.bn3(x)
        x = self.relu3(x)

        x = self.conv8(x)
        x = self.conv9(x)
        x = self.conv10(x)
        x = self.pool4(x)
        x = self.bn4(x)
        x = self.relu4(x)

        x = self.conv11(x)
        x = self.conv12(x)
        x = self.conv13(x)
        x = self.pool5(x)
        x = self.bn5(x)
        x = self.relu5(x)
        # print(" x shape ",x.size())
        x = x.view(-1,512*4*4)
        x = F.relu(self.fc14(x))
        x = self.drop1(x)
        x = F.relu(self.fc15(x))
        x = self.drop2(x)
        x = self.fc16(x)

        return x

    #使用SGD算法来进行训练和梯度更新
    def train_sgd(self,device):
        #定义Adam优化器
        optimizer = optim.Adam(self.parameters(), lr=0.0001)

        path = 'weights.tar'
        initepoch = 0

        if os.path.exists(path) is not True:
            #使用交叉熵损失函数
            loss = nn.CrossEntropyLoss()
            # optimizer = optim.SGD(self.parameters(),lr=0.01)

        else:
            checkpoint = torch.load(path)
            self.load_state_dict(checkpoint['model_state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            initepoch = checkpoint['epoch']
            loss = checkpoint['loss']

        #训练神经网络
        for epoch in range(initepoch,100):  # loop over the dataset multiple times
            timestart = time.time()

            running_loss = 0.0
            total = 0
            correct = 0
            for i, data in enumerate(trainloader, 0):
                # get the inputs
                inputs, labels = data
                inputs, labels = inputs.to(device),labels.to(device)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward + backward + optimize
                outputs = self(inputs)
                l = loss(outputs, labels)
                l.backward()
                optimizer.step()

                # print statistics
                running_loss += l.item()
                # print("i ",i)
                if i % 500 == 499:  # print every 500 mini-batches
                    print('[%d, %5d] loss: %.4f' %
                          (epoch, i, running_loss / 500))
                    running_loss = 0.0
                    _, predicted = torch.max(outputs.data, 1)
                    total += labels.size(0)
                    correct += (predicted == labels).sum().item()
                    print('Accuracy of the network on the %d tran images: %.3f %%' % (total,
                                                                                      100.0 * correct / total))
                    total = 0
                    correct = 0
                    torch.save({'epoch':epoch,
                                'model_state_dict':net.state_dict(),
                                'optimizer_state_dict':optimizer.state_dict(),
                                'loss':loss
                                },path)

            print('epoch %d cost %3f sec' %(epoch,time.time()-timestart))

        print('Finished Training')

    #在测试集上测试,得到预测准确率
    def test(self,device):
        correct = 0
        total = 0
        with torch.no_grad():
            for data in testloader:
                images, labels = data
                images, labels = images.to(device), labels.to(device)
                outputs = self(images)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

        print('Accuracy of the network on the 10000 test images: %.3f %%' % (
                100.0 * correct / total))


if __name__ == '__main__':
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net = Net()
    net = net.to(device)
    net.train_sgd(device)
    net.test(device)

总结

从上面这个使用CNN对CIFAR10数据集进行预测的例子中可以看出,Tensorflow对于小白来说更加友好,更加容易理解,但是因为其静态图计算的缘故,对大多数规模不定的问题而言,计算效率不高;而Pytorch则代码略繁琐,不过也不算难懂,其动态图计算能力强大,而且其有一个最大的特点——autograd自动微分计算,这点是pytorch最突出的优势,所以如果将来从事这一方面的话,还是建议两者都会把。