Train a Graph Attention Network v2 (GATv2) on Cora dataset

11import torch
12from torch import nn
13
14from labml import experiment
15from labml.configs import option
16from labml_helpers.module import Module
17from labml_nn.graphs.gat.experiment import Configs as GATConfigs
18from labml_nn.graphs.gatv2 import GraphAttentionV2Layer

Graph Attention Network v2 (GATv2)

This graph attention network has two graph attention layers.

21class GATv2(Module):
  • in_features is the number of features per node
  • n_hidden is the number of features in the first graph attention layer
  • n_classes is the number of classes
  • n_heads is the number of heads in the graph attention layers
  • dropout is the dropout probability
  • share_weights if set to True, the same matrix will be applied to the source and the target node of every edge
28    def __init__(self, in_features: int, n_hidden: int, n_classes: int, n_heads: int, dropout: float,
29                 share_weights: bool = True):
38        super().__init__()

First graph attention layer where we concatenate the heads

41        self.layer1 = GraphAttentionV2Layer(in_features, n_hidden, n_heads,
42                                            is_concat=True, dropout=dropout, share_weights=share_weights)

Activation function after first graph attention layer

44        self.activation = nn.ELU()

Final graph attention layer where we average the heads

46        self.output = GraphAttentionV2Layer(n_hidden, n_classes, 1,
47                                            is_concat=False, dropout=dropout, share_weights=share_weights)

Dropout

49        self.dropout = nn.Dropout(dropout)
  • x is the features vectors of shape [n_nodes, in_features]
  • adj_mat is the adjacency matrix of the form [n_nodes, n_nodes, n_heads] or [n_nodes, n_nodes, 1]
51    def forward(self, x: torch.Tensor, adj_mat: torch.Tensor):

Apply dropout to the input

58        x = self.dropout(x)

First graph attention layer

60        x = self.layer1(x, adj_mat)

Activation function

62        x = self.activation(x)

Dropout

64        x = self.dropout(x)

Output layer (without activation) for logits

66        return self.output(x, adj_mat)

Configurations

Since the experiment is same as GAT experiment but with GATv2 model we extend the same configs and change the model.

69class Configs(GATConfigs):

Whether to share weights for source and target nodes of edges

78    share_weights: bool = False

Set the model

80    model: GATv2 = 'gat_v2_model'

Create GATv2 model

83@option(Configs.model)
84def gat_v2_model(c: Configs):
88    return GATv2(c.in_features, c.n_hidden, c.n_classes, c.n_heads, c.dropout, c.share_weights).to(c.device)
91def main():

Create configurations

93    conf = Configs()

Create an experiment

95    experiment.create(name='gatv2')

Calculate configurations.

97    experiment.configs(conf, {

Adam optimizer

99        'optimizer.optimizer': 'Adam',
100        'optimizer.learning_rate': 5e-3,
101        'optimizer.weight_decay': 5e-4,
102
103        'dropout': 0.7,
104    })

Start and watch the experiment

107    with experiment.start():

Run the training

109        conf.run()

113if __name__ == '__main__':
114    main()