4.1 模型构造
import torchfrom torch import nnprint(torch.__version__)
1.1.0
4.1.1 继承 Module 类来构造模型
Module 是 torch.nn 模块提供的一个模型构造类,是所有神经网络模块的基类
class MLP(nn.Module):# 声明带有模型参数的层,这里声明了两个全连接层def __init__(self, **kwargs):# 调用MLP父类Block的构造函数来进行必要的初始化。这样在构造实例时还可以指定其他函数参数super(MLP, self).__init__(**kwargs)self.hidden = nn.Linear(784, 256) # 隐藏层self.act = nn.ReLU()self.output = nn.Linear(256, 10) # 输出层# 定义模型的前向计算,即如何根据输入x计算返回所需要的模型输出def forward(self, x):a = self.act(self.hidden(x))return self.output(a)
X = torch.rand(2, 784)net = MLP()print(net)net(X)"""MLP((hidden): Linear(in_features=784, out_features=256, bias=True)(act): ReLU()(output): Linear(in_features=256, out_features=10, bias=True))tensor([[ 0.0234, -0.2646, -0.1168, -0.2127, 0.0884, -0.0456, 0.0811, 0.0297,0.2032, 0.1364],[ 0.1479, -0.1545, -0.0265, -0.2119, -0.0543, -0.0086, 0.0902, -0.1017,0.1504, 0.1144]], grad_fn=<AddmmBackward>)"""
4.1.2 Module 的子类
4.1.2.1 Sequential 类
Sequential 类可以通过更加简单的方式定义模型
class MySequential(nn.Module):from collections import OrderedDictdef __init__(self, *args):super(MySequential, self).__init__()if len(args) == 1 and isinstance(args[0], OrderedDict): # 如果传入的是一个OrderedDictfor key, module in args[0].items():# key 是这个模块的名称# module 是模块的实现方法self.add_module(key, module) # add_module方法会将module添加进self._modules(一个OrderedDict)else: # 传入的是一些Modulefor idx, module in enumerate(args):self.add_module(str(idx), module)def forward(self, input):# self._modules返回一个 OrderedDict,保证会按照成员添加时的顺序遍历成for module in self._modules.values():input = module(input)return input
net = MySequential(nn.Linear(784, 256),nn.ReLU(),nn.Linear(256, 10),)print(net)net(X)# 输出"""MySequential((0): Linear(in_features=784, out_features=256, bias=True)(1): ReLU()(2): Linear(in_features=256, out_features=10, bias=True))tensor([[ 0.1273, 0.1642, -0.1060, 0.1401, 0.0609, -0.0199, -0.0140, -0.0588,0.1765, -0.1296],[ 0.0267, 0.1670, -0.0626, 0.0744, 0.0574, 0.0413, 0.1313, -0.1479,0.0932, -0.0615]], grad_fn=<AddmmBackward>)"""
from collections import OrderedDictnet = MySequential(OrderedDict([("Linear_1", nn.Linear(784, 256)),("relu_1", nn.ReLU()),("Linear_2", nn.Linear(256, 10))]))print(net)"""MySequential((Linear_1): Linear(in_features=784, out_features=256, bias=True)(relu_1): ReLU()(Linear_2): Linear(in_features=256, out_features=10, bias=True))"""
4.1.2.2 ModuleList 类
ModuleList 接收⼀个⼦模块的列表作为输⼊,然后也可以类似List那样进⾏append和extend操作
net = nn.ModuleList([nn.Linear(784, 256), nn.ReLU()])net.append(nn.Linear(256, 10)) # # 类似List的append操作print(net[-1]) # 类似List的索引访问print(net)# net(torch.zeros(1, 784)) # 会报NotImplementedError"""Linear(in_features=256, out_features=10, bias=True)ModuleList((0): Linear(in_features=784, out_features=256, bias=True)(1): ReLU()(2): Linear(in_features=256, out_features=10, bias=True))"""
class MyModule(nn.Module):def __init__(self):super(MyModule, self).__init__()self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])def forward(self, x):# ModuleList can act as an iterable, or be indexed using intsfor i, l in enumerate(self.linears):x = self.linears[i // 2](x) + l(x)return x
class Module_ModuleList(nn.Module):def __init__(self):super(Module_ModuleList, self).__init__()self.linears = nn.ModuleList([nn.Linear(10, 10)])class Module_List(nn.Module):def __init__(self):super(Module_List, self).__init__()self.linears = [nn.Linear(10, 10)]net1 = Module_ModuleList()net2 = Module_List()print("net1:")for p in net1.parameters():print(p.size())print("net2:")for p in net2.parameters():print(p)"""net1:torch.Size([10, 10])torch.Size([10])net2:"""
4.1.2.3 ModuleDict 类
ModuleDict 接收⼀个⼦模块的字典作为输⼊, 然后也可以类似字典那样进⾏添加访问操作:
net = nn.ModuleDict({'linear': nn.Linear(784, 256),'act': nn.ReLU(),})net['output'] = nn.Linear(256, 10) # 添加print(net['linear']) # 访问print(net.output)print(net)# net(torch.zeros(1, 784)) # 会报NotImplementedError"""Linear(in_features=784, out_features=256, bias=True)Linear(in_features=256, out_features=10, bias=True)ModuleDict((act): ReLU()(linear): Linear(in_features=784, out_features=256, bias=True)(output): Linear(in_features=256, out_features=10, bias=True))"""
4.1.3 构造复杂的模型
虽然上⾯介绍的这些类可以使模型构造更加简单,且不需要定义 forward 函数,但直接继承 Module类可以极⼤地拓展模型构造的灵活性。下⾯我们构造⼀个稍微复杂点的⽹络 FancyMLP 。在这个⽹络
中,我们通过 get_constant 函数创建训练中不被迭代的参数,即常数参数。在前向计算中,除了使⽤
创建的常数参数外,我们还使⽤ Tensor 的函数和Python的控制流,并多次调⽤相同的层。
V
class FancyMLP(nn.Module):def __init__(self, **kwargs):super(FancyMLP, self).__init__(**kwargs)self.rand_weight = torch.rand((20, 20), requires_grad=False) # 不可训练参数(常数参数)self.linear = nn.Linear(20, 20)def forward(self, x):x = self.linear(x)# 使用创建的常数参数,以及nn.functional中的relu函数和mm函数x = nn.functional.relu(torch.mm(x, self.rand_weight.data) + 1)# 复用全连接层。等价于两个全连接层共享参数x = self.linear(x)# 控制流,这里我们需要调用item函数来返回标量进行比较while x.norm().item() > 1:x /= 2if x.norm().item() < 0.8:x *= 10return x.sum()
X = torch.rand(2, 20)net = FancyMLP()print(net)net(X)"""FancyMLP((linear): Linear(in_features=20, out_features=20, bias=True))tensor(0.8907, grad_fn=<SumBackward0>)"""
class NestMLP(nn.Module):def __init__(self, **kwargs):super(NestMLP, self).__init__(**kwargs)self.net = nn.Sequential(nn.Linear(40, 30), nn.ReLU())def forward(self, x):return self.net(x)net = nn.Sequential(NestMLP(), nn.Linear(30, 20), FancyMLP())X = torch.rand(2, 40)print(net)net(X)"""Sequential((0): NestMLP((net): Sequential((0): Linear(in_features=40, out_features=30, bias=True)(1): ReLU()))(1): Linear(in_features=30, out_features=20, bias=True)(2): FancyMLP((linear): Linear(in_features=20, out_features=20, bias=True)))tensor(-0.4605, grad_fn=<SumBackward0>)"""
