This repository is our implementation of
Hongyuan Zhang, Yanan Zhu, and Xuelong Li, "Decouple Graph Neural Networks: Train Multiple Simple GNNs Simultaneously Instead of One," IEEE Transactions on Pattern Analysis and Machine Intelligence (T-PAMI), vol. 46, no. 11, pp. 7451-7462, 2024.(arXiv)(IEEE)
SGNN attempts to further reduce the training complexity of each iteration from
Compared with other fast GNNs, SGNN can
- (Exact) compute representations exactly (without sampling);
- (Non-linear) use up to
$L$ non-linear activations ($L$ is the number of layers); - (Fast) be trained with the real stochastic (mini-batch based) optimization algorithms.
The comparison is summarized in the following table.
If you have issues, please email:
- pytorch 1.10.0
- scipy 1.3.1
- scikit-learn 0.21.3
- numpy 1.16.5
Please ensure the data is rightly loaded
python run.py
python run_classfication.py
eta = 100, BP_count=5
layers = [
LayerParam(128, inner_act=linear_func, act=leaky_relu_func, gnn_type=LayerParam.EGCN,
learning_rate=10**-2, order=1, max_iter=60, lam=10**-3, batch_size=2708),
LayerParam(64, inner_act=linear_func, act=relu_func, gnn_type=LayerParam.EGCN,
learning_rate=10**-2, order=1, max_iter=60, lam=10**-3, batch_size=2708),
LayerParam(32, inner_act=linear_func, act=linear_func, gnn_type=LayerParam.EGCN,
learning_rate=0.01, order=2, max_iter=60, lam=10**-3, batch_size=140),
]
eta = 100, BP_count = 3
layers = [
LayerParam(256, inner_act=relu_func, act=leaky_relu_func, gnn_type=LayerParam.EGCN,
learning_rate=10**-2, order=1, max_iter=40, lam=10**-3, batch_size=1024),
LayerParam(128, inner_act=relu_func, act=linear_func, gnn_type=LayerParam.EGCN,
learning_rate=10**-3, order=1, max_iter=40, lam=10**-3, batch_size=140),
]
eta = 100, BP_count = 3
layers = [
LayerParam(256, inner_act=relu_func, act=leaky_relu_func, gnn_type=LayerParam.EGCN,
learning_rate=10**-2, order=1, max_iter=100, lam=10**-3, batch_size=4096*2),
LayerParam(128, inner_act=relu_func, act=leaky_relu_func, gnn_type=LayerParam.EGCN,
learning_rate=10**-4, order=2, max_iter=40, lam=10**-3, batch_size=2048),
]
eta = 1, BP_count = 10
layers = [
LayerParam(128, inner_act=linear_func, act=leaky_relu_func, gnn_type=LayerParam.GAE,
mask_rate=0.2, lam=lam, max_iter=max_iter, learning_rate=learning_rate,
batch_size=batch_size),
LayerParam(64, inner_act=linear_func, act=leaky_relu_func, gnn_type=LayerParam.GAE,
mask_rate=0.2, lam=lam, max_iter=max_iter, learning_rate=learning_rate,
batch_size=batch_size),
LayerParam(32, inner_act=linear_func, act=linear_func, gnn_type=LayerParam.GAE,
mask_rate=0.2, lam=lam, max_iter=max_iter, learning_rate=learning_rate,
batch_size=batch_size),
]
eta = 10, BP_count = 10
leaky_relu_func = Func(torch.nn.functional.leaky_relu, negative_slope=5.0)
layers = [
LayerParam(128, inner_act=linear_func, act=leaky_relu_func, gnn_type=LayerParam.GAE,
mask_rate=0.2, lam=lam, max_iter=max_iter, learning_rate=learning_rate,
batch_size=batch_size),
LayerParam(64, inner_act=linear_func, act=leaky_relu_func, gnn_type=LayerParam.GAE,
mask_rate=0.2, lam=lam, max_iter=max_iter, learning_rate=learning_rate,
batch_size=batch_size),
LayerParam(32, inner_act=linear_func, act=linear_func, gnn_type=LayerParam.GAE,
mask_rate=0.2, lam=lam, max_iter=max_iter, learning_rate=learning_rate,
batch_size=batch_size),
]
- mask_rate = 0.2
- overlook_rates=None
- layers=[256, 128]
- max_iter=200
- batch=256
- BP_count=5
- learning_rate=**10^-4 **
- lam=10^-6
- eta=10, loss = loss1
- order=2
- AU -> leaky relu slope=0.2
- activation ->linear
- mask_rate = 0.2
- overlook_rates=None
- layers=[128, 64]
- max_iter=10000
- batch=512
- BP_count=5
- learning_rate=1e-4
- lam=10^-6
- eta=10, loss = loss1
- order=2
- inner_act -> relu
- activation ->linear
@article{SGNN,
author={Zhang, Hongyuan and Zhu, Yanan and Li, Xuelong},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
title={Decouple Graph Neural Networks: Train Multiple Simple GNNs Simultaneously Instead of One},
year={2024},
volume={46},
number={11},
pages={7451--7462},
doi={10.1109/TPAMI.2024.3392782}
}