抽取中间某一层特征

获取Pytorch中间某一层权重或者特征_happyday_d的博客 .

pytorch 提取权重_获取Pytorch中间某一层权重或者特征 …

每层特征可视化

分层学习率

https://blog.csdn.net/Ming_LQ/article/details/113763667

CBAM

channel attention

  1. '''
  2. Description: https://www.yuque.com/huangzhongqing/lxph5a/mur8gs#o8sag
  3. Author: HCQ
  4. Company(School): UCAS
  5. Email: 1756260160@qq.com
  6. Date: 2021-10-19 10:47:15
  7. LastEditTime: 2021-10-21 11:05:20
  8. FilePath: /mmdetection3d/mmdet3d/models/necks/cbam.py
  9. '''
  10. import torch
  11. import torch.nn as nn
  12. import torchvision
  13. # 在通道维度上进行全局的pooling操作,再经过同一个MLP得到权重,相加作为最终的注意力向量(权重)。
  14. class ChannelAttentionModule(nn.Module):
  15. def __init__(self, channel, ratio=16):
  16. super(ChannelAttentionModule, self).__init__()
  17. self.avg_pool = nn.AdaptiveAvgPool2d(1)
  18. self.max_pool = nn.AdaptiveMaxPool2d(1)
  19. self.shared_MLP = nn.Sequential(
  20. nn.Conv2d(channel, channel // ratio, 1, bias=False),
  21. nn.ReLU(),
  22. nn.Conv2d(channel // ratio, channel, 1, bias=False)
  23. )
  24. self.sigmoid = nn.Sigmoid()
  25. def forward(self, x):
  26. avgout = self.shared_MLP(self.avg_pool(x)) # torch.Size([1, 16, 1, 1])
  27. print('【Channel】avgout.shape {}'.format(avgout.shape)) # torch.Size([1, 16, 1, 1])
  28. maxout = self.shared_MLP(self.max_pool(x)) # torch.Size([1, 16, 1, 1])
  29. return self.sigmoid(avgout + maxout) # torch.Size([1, 16, 1, 1])
  30. class SpatialAttentionModule(nn.Module):
  31. def __init__(self):
  32. super(SpatialAttentionModule, self).__init__()
  33. self.conv2d = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=7, stride=1, padding=3)
  34. self.sigmoid = nn.Sigmoid()
  35. def forward(self, x):
  36. avgout = torch.mean(x, dim=1, keepdim=True)
  37. maxout, _ = torch.max(x, dim=1, keepdim=True)
  38. out = torch.cat([avgout, maxout], dim=1)
  39. out = self.sigmoid(self.conv2d(out))
  40. return out
  41. # CBAM调用
  42. class CBAM(nn.Module):
  43. def __init__(self, channel):
  44. super(CBAM, self).__init__()
  45. self.channel_attention = ChannelAttentionModule(channel) # 通道注意力
  46. self.spatial_attention = SpatialAttentionModule() # 空间注意力
  47. def forward(self, x):
  48. out = self.channel_attention(x) * x # torch.Size([1, 16, 1, 1]) * torch.Size([1, 16, 64, 64])
  49. print('outchannels:{}'.format(out.shape)) # outchannels:torch.Size([1, 16, 64, 64])
  50. out = self.spatial_attention(out) * out # # torch.Size([1, 1, 64, 64]) * torch.Size([1, 16, 64, 64])
  51. return out
  52. class ResBlock_CBAM(nn.Module):
  53. def __init__(self,in_places, places, stride=1,downsampling=False, expansion = 4):
  54. super(ResBlock_CBAM,self).__init__()
  55. self.expansion = expansion
  56. self.downsampling = downsampling
  57. self.bottleneck = nn.Sequential(
  58. nn.Conv2d(in_channels=in_places,out_channels=places,kernel_size=1,stride=1, bias=False),
  59. nn.BatchNorm2d(places),
  60. nn.ReLU(inplace=True),
  61. nn.Conv2d(in_channels=places, out_channels=places, kernel_size=3, stride=stride, padding=1, bias=False),
  62. nn.BatchNorm2d(places),
  63. nn.ReLU(inplace=True),
  64. nn.Conv2d(in_channels=places, out_channels=places*self.expansion, kernel_size=1, stride=1, bias=False),
  65. nn.BatchNorm2d(places*self.expansion),
  66. )
  67. self.cbam = CBAM(channel=places*self.expansion) # 初始化cbam 输入参数channel
  68. if self.downsampling:
  69. self.downsample = nn.Sequential(
  70. nn.Conv2d(in_channels=in_places, out_channels=places*self.expansion, kernel_size=1, stride=stride, bias=False),
  71. nn.BatchNorm2d(places*self.expansion)
  72. )
  73. self.relu = nn.ReLU(inplace=True)
  74. def forward(self, x):
  75. residual = x
  76. out = self.bottleneck(x)
  77. print(x.shape) # torch.Size([1, 16, 64, 64])
  78. out = self.cbam(out) # 调用cbam=============================
  79. if self.downsampling:
  80. residual = self.downsample(x)
  81. out += residual
  82. out = self.relu(out)
  83. return out
  84. model = ResBlock_CBAM(in_places=16, places=4) # 调用
  85. print(model)
  86. input = torch.randn(1, 16, 64, 64) # (B C H W)注意维度
  87. out = model(input)
  88. print('out.shape {}'.format(out.shape)) # torch.Size([1, 16, 64, 64])
  89. # 运行代码: python mmdet3d/models/necks/cbam.py
  90. '''
  91. ResBlock_CBAM(
  92. (bottleneck): Sequential(
  93. (0): Conv2d(16, 4, kernel_size=(1, 1), stride=(1, 1), bias=False)
  94. (1): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  95. (2): ReLU(inplace=True)
  96. (3): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  97. (4): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  98. (5): ReLU(inplace=True)
  99. (6): Conv2d(4, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)
  100. (7): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  101. )
  102. (cbam): CBAM(
  103. (channel_attention): ChannelAttentionModule(
  104. (avg_pool): AdaptiveAvgPool2d(output_size=1)
  105. (max_pool): AdaptiveMaxPool2d(output_size=1)
  106. (shared_MLP): Sequential(
  107. (0): Conv2d(16, 1, kernel_size=(1, 1), stride=(1, 1), bias=False)
  108. (1): ReLU()
  109. (2): Conv2d(1, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)
  110. )
  111. (sigmoid): Sigmoid()
  112. )
  113. (spatial_attention): SpatialAttentionModule(
  114. (conv2d): Conv2d(2, 1, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3))
  115. (sigmoid): Sigmoid()
  116. )
  117. )
  118. (relu): ReLU(inplace=True)
  119. )
  120. torch.Size([1, 16, 64, 64])
  121. [Channel]avgout.shape torch.Size([1, 16, 1, 1])
  122. outchannels:torch.Size([1, 16, 64, 64])
  123. out.shape torch.Size([1, 16, 64, 64])
  124. '''