使用官方的GAT:https://blog.csdn.net/StarfishCu/article/details/109644271GAT学习:PyG实现GAT(使用PyG封装好的GATConv函数)(三)
别人自己实现的GAT:https://blog.csdn.net/StarfishCu/article/details/109237526GAT学习:PyG实现GAT(图注意力神经网络)网络(一)
PyG的tutorial https://colab.research.google.com/github/AntonioLonga/PytorchGeometricTutorial/blob/main/Tutorial3/Tutorial3.ipynb
封装
from torch_geometric.data import Datafrom torch_geometric.nn import GATConvfrom torch_geometric.datasets import Planetoidimport torch_geometric.transforms as Timport matplotlib.pyplot as pltname_data = 'Cora'dataset = Planetoid(root= '/tmp/' + name_data, name = name_data)dataset.transform = T.NormalizeFeatures()print(f"Number of Classes in {name_data}:", dataset.num_classes)print(f"Number of Node Features in {name_data}:", dataset.num_node_features)
class GAT(torch.nn.Module):def __init__(self):super(GAT, self).__init__()self.hid = 8self.in_head = 8self.out_head = 1self.conv1 = GATConv(dataset.num_features, self.hid, heads=self.in_head, dropout=0.6)self.conv2 = GATConv(self.hid*self.in_head, dataset.num_classes, concat=False,heads=self.out_head, dropout=0.6)def forward(self, data):x, edge_index = data.x, data.edge_indexx = F.dropout(x, p=0.6, training=self.training)x = self.conv1(x, edge_index)x = F.elu(x)x = F.dropout(x, p=0.6, training=self.training)x = self.conv2(x, edge_index)return F.log_softmax(x, dim=1)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')device = "cpu"model = GAT().to(device)data = dataset[0].to(device)optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=5e-4)model.train()for epoch in range(1000):model.train()optimizer.zero_grad()out = model(data)loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])if epoch%200 == 0:print(loss)loss.backward()optimizer.step()
model.eval()_, pred = model(data).max(dim=1)correct = float(pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())acc = correct / data.test_mask.sum().item()print('Accuracy: {:.4f}'.format(acc))
