code’github link: https://github.com/clovaai/wsolevaluation

    该博客是ADL的backbone结构的学习笔记:

    ResNet50残差块定义:

    1. class Bottleneck(nn.Module):
    2. expansion = 4
    3. # 表示输出的channel的膨胀系数为4 64-->256
    4. def __init__(self, inplanes, planes, stride=1, downsample=None,
    5. base_width=64):
    6. super(Bottleneck, self).__init__()
    7. '''
    8. 下面分别是三个Conv层
    9. 1x1, 3x3, 1x1,其中最后一层1x1完成planes*4的out_channel定型
    10. 如果有下采样需求,在第一层的stride=stride中完成
    11. '''
    12. width = int(planes * (base_width / 64.))
    13. self.conv1 = nn.Conv2d(inplanes, width, 1, bias=False)
    14. self.bn1 = nn.BatchNorm2d(width)
    15. self.conv2 = nn.Conv2d(width, width, 3,
    16. stride=stride, padding=1, bias=False)
    17. self.bn2 = nn.BatchNorm2d(width)
    18. self.conv3 = nn.Conv2d(width, planes * self.expansion, 1, bias=False)
    19. # 在最后一层1x1卷积核完成通道的增加(4倍)
    20. self.bn3 = nn.BatchNorm2d(planes * self.expansion)
    21. self.relu = nn.ReLU(inplace=True)
    22. self.downsample = downsample
    23. self.stride = stride
    24. def forward(self, x):
    25. identity = x
    26. out = self.conv1(x)
    27. out = self.bn1(out)
    28. out = self.relu(out)
    29. out = self.conv2(out)
    30. out = self.bn2(out)
    31. out = self.relu(out)
    32. out = self.conv3(out)
    33. out = self.bn3(out)
    34.   #如果说残差块输入的channel和输出的channel不一致
    35. #如果有下采样需求(stride>1,比如说为2),跑出来的结果就是残差块里的残差
    36. # downsample的具体操作在 get_downsampling_layer 函数中体现
    37. if self.downsample is not None:
    38. identity = self.downsample(x)
    39. out += identity
    40. out = self.relu(out)
    41. return out

    具体的ResNetADL的网络模型构建:

    1. _ADL_POSITION = [[], [], [], [0], [0, 2]]
    2. '''
    3. 在第几层添加ADL模块,论文中有实验数据表明,
    4. 由于层数越深,特征图像素点的感受野越大,
    5. drop_mask所drop的区域面积也就越大
    6. '''
    7. class ResNetAdl(nn.Module):
    8. def __init__(self, block, layers, num_classes=1000,
    9. large_feature_map=False, **kwargs):
    10. super(ResNetAdl, self).__init__()
    11. self.stride_l3 = 1 if large_feature_map else 2
    12. '''
    13. large_feature_map=Ture self.stride_l3=1
    14. large_feature_map=False self.stride_l3=2
    15. 特征图会大一些,在原ResNet中作者就这个问题给出了一点说明:
    16. 'it is slightly better whereas slower to set stride = 1'
    17. '''
    18. self.inplanes = 64
    19. #ADL模块的两个参数
    20. self.adl_drop_rate = kwargs['adl_drop_rate']
    21. self.adl_threshold = kwargs['adl_drop_threshold']
    22. self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2,
    23. padding=3, bias=False)
    24. # 这里可以把7x7的卷积核改为3个3x3的卷积核防止信息的丢失
    25. self.bn1 = nn.BatchNorm2d(self.inplanes)
    26. self.relu = nn.ReLU(inplace=True)
    27. self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
    28. self.layer1 = self._make_layer(block, 64, layers[0],
    29. stride=1,
    30. split=_ADL_POSITION[1])
    31. self.layer2 = self._make_layer(block, 128, layers[1],
    32. stride=2,
    33. split=_ADL_POSITION[2])
    34. self.layer3 = self._make_layer(block, 256, layers[2],
    35. stride=self.stride_l3,
    36. split=_ADL_POSITION[3])
    37. # it is slightly better whereas slower to set stride = 1
    38. self.layer4 = self._make_layer(block, 512, layers[3],
    39. stride=1,
    40. split=_ADL_POSITION[4])
    41. self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
    42. self.fc = nn.Linear(512 * block.expansion, num_classes)
    43. initialize_weights(self.modules(), init_mode='xavier')
    44. def forward(self, x, labels=None, return_cam=False):
    45. x = self.conv1(x)
    46. x = self.bn1(x)
    47. x = self.relu(x)
    48. x = self.maxpool(x)
    49. x = self.layer1(x)
    50. x = self.layer2(x)
    51. x = self.layer3(x)
    52. x = self.layer4(x)
    53. pre_logit = self.avgpool(x)
    54. pre_logit = pre_logit.reshape(pre_logit.size(0), -1)
    55. logits = self.fc(pre_logit)
    56. if return_cam:
    57. feature_map = x.detach().clone()
    58. '''
    59. 返回一个新的tensor,从当前计算图中分离下来的,
    60. 但是仍指向原变量的存放位置,不同之处只是requires_grad为false
    61. 这里把第4个残差模块的feature map给clone下来保存
    62. '''
    63. cam_weights = self.fc.weight[labels]
    64. # 再获得相应label对应feature map的权重
    65. cams = (cam_weights.view(*feature_map.shape[:2], 1, 1) *
    66. feature_map).mean(1, keepdim=False)
    67. return cams
    68. return {'logits': logits}
    69. def _make_layer(self, block, planes, blocks, stride, split=None):
    70. layers = self._layer(block, planes, blocks, stride)
    71. # layer type:list 用于储存网络的模块
    72. for pos in reversed(split):
    73. # 将ADL模块添加到layers里,分别添加到stage 4 的第一层后面和第三层后面
    74. layers.insert(pos + 1, ADL(self.adl_drop_rate, self.adl_threshold))
    75. return nn.Sequential(*layers)
    76. def _layer(self, block, planes, blocks, stride):
    77. downsample = get_downsampling_layer(self.inplanes, block, planes,
    78. stride)
    79. layers = [block(self.inplanes, planes, stride, downsample)]
    80. self.inplanes = planes * block.expansion
    81. for _ in range(1, blocks):
    82. layers.append(block(self.inplanes, planes))
    83. return layers
    84. def get_downsampling_layer(inplanes, block, planes, stride):
    85. outplanes = planes * block.expansion
    86. if stride == 1 and inplanes == outplanes:
    87. return
    88. else:
    89. return nn.Sequential(
    90. nn.Conv2d(inplanes, outplanes, 1, stride, bias=False),
    91. nn.BatchNorm2d(outplanes),
    92. )

    总体结构就是在ResNet上稍微做了做修改,应该很好理解。