本文主要是介绍GAT学习:PyG实现GAT(自定义GAT层)网络(四),希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
PyG实现自定义GAT层
- 完整代码
本系列中的第三篇介绍了如何调用pyg封装好的GAT函数,当然同样的,我们需要学会如何自定义网络层以满足研究需求。
完整代码
import torch
import math
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops,remove_self_loops,softmax
from torch_geometric.datasets import Planetoid
import ssl
import torch.nn.functional as Fclass GATConv(MessagePassing):def __init__(self, in_channels,out_channels, heads: int = 1, concat: bool = True,negative_slope: float = 0.2, dropout: float = 0.,add_self_loops: bool = True, bias: bool = True, **kwargs):kwargs.setdefault('aggr', 'add')super(GATConv, self).__init__(node_dim=0, **kwargs)#in_channel&out channel就是我们的输入输出数self.in_channels = in_channelsself.out_channels = out_channels#head即设置几个attention头self.heads = heads#concat用于设置是否拼接attention的输出self.concat = concat#negative_slope设置leaklyRelu的参数self.negative_slope = negative_slopeself.dropout = dropout#add_self_loops设置是否添加自环self.add_self_loops = add_self_loops#这里将特征映射到每个attention头所需要的特征数,从而满足每个attention头的输入self.lin = Linear(in_channels, heads * out_channels, bias=False)self.att = Parameter(torch.Tensor(1, heads, out_channels))if bias and concat:self.bias = torch.nn.Parameter(torch.Tensor(heads * out_channels))elif bias and not concat:self.bias = torch.nn.Parameter(torch.Tensor(out_channels))else:self.register_parameter('bias', None)self._alpha = None#用于重置参数self.reset_parameters()def reset_parameters(self):glorot(self.lin.weight)glorot(self.att)zeros(self.bias)def forward(self, x, edge_index, return_attention_weights=None):H, C = self.heads, self.out_channelsx = self.lin(x).view(-1, H, C)#这里alpha的规模为[node_num,heads]alpha = (x * self.att).sum(dim=-1)if self.add_self_loops:num_nodes = x.size(0)num_nodes = x.size(0) if x is not None else num_nodesedge_index, _ = remove_self_loops(edge_index)edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes)# propagate_type: (x: OptPairTensor, alpha: OptPairTensor)out = self.propagate(edge_index, x=x,alpha=alpha)alpha = self._alphaself._alpha = Noneif self.concat:out = out.view(-1, self.heads * self.out_channels)else:out = out.mean(dim=1)if self.bias is not None:out += self.biasif isinstance(return_attention_weights, bool):return out, (edge_index, alpha)else:return outdef message(self, x_j, alpha_j, index):alpha = alpha_j#alpha_j[edge_num,heads]alpha = F.leaky_relu(alpha, self.negative_slope)alpha = softmax(alpha, index)self._alpha = alphaalpha = F.dropout(alpha, p=self.dropout, training=self.training)return x_j * alpha.unsqueeze(-1)class Net(torch.nn.Module):def __init__(self):super(Net,self).__init__()self.gat1=GATConv(dataset.num_node_features,8,8,dropout=0.6)self.gat2=GATConv(64,7,1,dropout=0.6)def forward(self,data):x,edge_index=data.x, data.edge_indexx=self.gat1(x,edge_index)x=self.gat2(x,edge_index)return F.log_softmax(x,dim=1)dataset = Planetoid(root='Cora', name='Cora')
x=dataset[0].x
edge_index=dataset[0].edge_indexdevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net().to(device)
data = dataset[0].to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)model.train()
for epoch in range(100):optimizer.zero_grad()out = model(data)loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])loss.backward()optimizer.step()model.eval()
_, pred = model(data).max(dim=1)
correct = int(pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())
acc = correct/int(data.test_mask.sum())
print('Accuracy:{:.4f}'.format(acc))
>>>Accuracy:0.7930
这篇关于GAT学习:PyG实现GAT(自定义GAT层)网络(四)的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!