定义网络

  1. import torch
  2. from torch_geometric.nn import MessagePassing
  3. from torch_geometric.utils import add_self_loops, degree
  4. class GCNConv(MessagePassing):
  5. def __init__(self, in_channels, out_channels):
  6. super(GCNConv, self).__init__(aggr='add', flow='source_to_target')
  7. # "Add" aggregation (Step 5).
  8. # flow='source_to_target' 表示消息从源节点传播到目标节点
  9. self.lin = torch.nn.Linear(in_channels, out_channels)
  10. def forward(self, x, edge_index):
  11. # x has shape [N, in_channels]
  12. # edge_index has shape [2, E]
  13. # Step 1: Add self-loops to the adjacency matrix.
  14. edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
  15. # Step 2: Linearly transform node feature matrix.
  16. #将特征矩阵线性变化,本次的实验中变化:1433→16→7
  17. print(f'线性变化之前的Xshape:{x.shape}')
  18. #print(f'线性变化之前的X:{x}')
  19. x = self.lin(x)
  20. #print(f'变化后的x:{x}')
  21. print(f'变化后的x.shape:{x.shape}')
  22. # Step 3: Compute normalization.计算归一化
  23. row, col = edge_index
  24. #x.size(0)=2708,也就是结点的数量
  25. #degree算出的是结点的度数,由于是对称矩阵,所以row和col的degree相等
  26. deg = degree(col, x.size(0), dtype=x.dtype)
  27. print(f'row:{row}')
  28. print(f'col:{col}')
  29. print(f'x.size(0):{x.size(0)}')
  30. deg_inv_sqrt = deg.pow(-0.5)
  31. print(f' deg_inv_sqrt:{ deg_inv_sqrt}')
  32. norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
  33. print(f'norm:{norm}')
  34. # Step 4-5: Start propagating messages.
  35. return self.propagate(edge_index, x=x, norm=norm)
  36. def message(self, x_j, norm):
  37. # x_j has shape [E, out_channels]
  38. # Step 4: Normalize node features.
  39. return norm.view(-1, 1) * x_j

初始化

数据集

  1. from torch_geometric.datasets import Planetoid
  2. import torch_geometric.transforms as T
  3. name_data = 'Cora'
  4. dataset = Planetoid(root='./data/', name=name_data)
  1. class Net(torch.nn.Module):
  2. # torch.nn.Module 是所有神经网络单元的基类
  3. def __init__(self):
  4. super(Net, self).__init__() ###复制并使用Net的父类的初始化方法,即先运行nn.Module的初始化函数
  5. self.conv1 = GCNConv(dataset.num_node_features, 16)
  6. self.conv2 = GCNConv(16, dataset.num_classes)
  7. def forward(self, data):
  8. x, edge_index = data.x, data.edge_index
  9. x = self.conv1(x, edge_index)
  10. x = F.relu(x)
  11. x = F.dropout(x, training=self.training)
  12. x = self.conv2(x, edge_index)
  13. return F.log_softmax(x, dim=1)
  1. net=Net()
  2. out=net(dataset[0])

输出

由于定义了2个GCNConv层,所以输出可以看到变化的信息。
x.size(0)=2708,也就是结点的数量num_nodes
第一个:特征由1433→16,这是由于forward里的x = self.lin(x) (这里的self.lin = torch.nn.Linear(in_channels, out_channels)
image.png
第二个:特征由16→7
image.png

rowshape:torch.Size([13264])
row和col的形状都是[13264]
deg。shape:torch.Size([2708])deg里面是结点的度

image.png