forked from aimat-lab/gcnn_keras
-
Notifications
You must be signed in to change notification settings - Fork 0
/
_make.py
123 lines (107 loc) · 5.79 KB
/
_make.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import tensorflow as tf
from kgcnn.layers.casting import ChangeTensorType
from kgcnn.layers.attention import AttentionHeadGAT
from kgcnn.layers.modules import LazyConcatenate, Dense, LazyAverage, Activation, \
OptionalInputEmbedding
from kgcnn.layers.mlp import GraphMLP, MLP
from kgcnn.layers.pooling import PoolingNodes
from kgcnn.model.utils import update_model_kwargs
ks = tf.keras
# Keep track of model version from commit date in literature.
# To be updated if model is changed in a significant way.
__model_version__ = "2022.11.25"
# Implementation of GAT in `tf.keras` from paper:
# Graph Attention Networks
# by Petar Veličković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Liò, Yoshua Bengio (2018)
# https://arxiv.org/abs/1710.10903
model_default = {
"name": "GAT",
"inputs": [{"shape": (None,), "name": "node_attributes", "dtype": "float32", "ragged": True},
{"shape": (None,), "name": "edge_attributes", "dtype": "float32", "ragged": True},
{"shape": (None, 2), "name": "edge_indices", "dtype": "int64", "ragged": True}],
"input_embedding": {"node": {"input_dim": 95, "output_dim": 64},
"edge": {"input_dim": 5, "output_dim": 64}},
"attention_args": {"units": 32, "use_final_activation": False, "use_edge_features": True,
"has_self_loops": True, "activation": "kgcnn>leaky_relu", "use_bias": True},
"pooling_nodes_args": {"pooling_method": "mean"},
"depth": 3, "attention_heads_num": 5,
"attention_heads_concat": False, "verbose": 10,
"output_embedding": "graph", "output_to_tensor": True,
"output_mlp": {"use_bias": [True, True, False], "units": [25, 10, 1],
"activation": ["relu", "relu", "sigmoid"]}
}
@update_model_kwargs(model_default)
def make_model(inputs: list = None,
input_embedding: dict = None,
attention_args: dict = None,
pooling_nodes_args: dict = None,
depth: int = None,
attention_heads_num: int = None,
attention_heads_concat: bool = None,
name: str = None,
verbose: int = None,
output_embedding: str = None,
output_to_tensor: bool = None,
output_mlp: dict = None
):
r"""Make `GAT <https://arxiv.org/abs/1710.10903>`_ graph network via functional API.
Default parameters can be found in :obj:`kgcnn.literature.GAT.model_default`.
Inputs:
list: `[node_attributes, edge_attributes, edge_indices]`
- node_attributes (tf.RaggedTensor): Node attributes of shape `(batch, None, F)` or `(batch, None)`
using an embedding layer.
- edge_attributes (tf.RaggedTensor): Edge attributes of shape `(batch, None, F)` or `(batch, None)`
using an embedding layer.
- edge_indices (tf.RaggedTensor): Index list for edges of shape `(batch, None, 2)`.
Outputs:
tf.Tensor: Graph embeddings of shape `(batch, L)` if :obj:`output_embedding="graph"`.
Args:
inputs (list): List of dictionaries unpacked in :obj:`tf.keras.layers.Input`. Order must match model definition.
input_embedding (dict): Dictionary of embedding arguments for nodes etc. unpacked in :obj:`Embedding` layers.
attention_args (dict): Dictionary of layer arguments unpacked in :obj:`AttentionHeadGAT` layer.
pooling_nodes_args (dict): Dictionary of layer arguments unpacked in :obj:`PoolingNodes` layer.
depth (int): Number of graph embedding units or depth of the network.
attention_heads_num (int): Number of attention heads to use.
attention_heads_concat (bool): Whether to concat attention heads, or simply average heads.
name (str): Name of the model.
verbose (int): Level of print output.
output_embedding (str): Main embedding task for graph network. Either "node", "edge" or "graph".
output_to_tensor (bool): Whether to cast model output to :obj:`tf.Tensor`.
output_mlp (dict): Dictionary of layer arguments unpacked in the final classification :obj:`MLP` layer block.
Defines number of model outputs and activation.
Returns:
:obj:`tf.keras.models.Model`
"""
# Make input
node_input = ks.layers.Input(**inputs[0])
edge_input = ks.layers.Input(**inputs[1])
edge_index_input = ks.layers.Input(**inputs[2])
# Embedding, if no feature dimension
n = OptionalInputEmbedding(**input_embedding['node'],
use_embedding=len(inputs[0]['shape']) < 2)(node_input)
ed = OptionalInputEmbedding(**input_embedding['edge'],
use_embedding=len(inputs[1]['shape']) < 2)(edge_input)
edi = edge_index_input
# Model
nk = Dense(units=attention_args["units"], activation="linear")(n)
for i in range(0, depth):
heads = [AttentionHeadGAT(**attention_args)([nk, ed, edi]) for _ in range(attention_heads_num)]
if attention_heads_concat:
nk = LazyConcatenate(axis=-1)(heads)
else:
nk = LazyAverage()(heads)
nk = Activation(activation=attention_args["activation"])(nk)
n = nk
# Output embedding choice
if output_embedding == 'graph':
out = PoolingNodes(**pooling_nodes_args)(n)
out = MLP(**output_mlp)(out)
elif output_embedding == 'node':
out = GraphMLP(**output_mlp)(n)
if output_to_tensor: # For tf version < 2.8 cast to tensor below.
out = ChangeTensorType(input_tensor_type="ragged", output_tensor_type="tensor")(out)
else:
raise ValueError("Unsupported output embedding for `GAT`")
model = ks.models.Model(inputs=[node_input, edge_input, edge_index_input], outputs=out, name=name)
model.__kgcnn_model_version__ = __model_version__
return model