-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain_transductive_mv.py
109 lines (81 loc) · 2.87 KB
/
main_transductive_mv.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import numpy as np
from tqdm import tqdm
import torch
import torch.nn.functional as F
import pdb
from psnrgnn.utils import (
create_optimizer,
set_random_seed,
accuracy,
evaluate,
p,
)
from psnrgnn.datasets.dataset import load_dataset, split_datasets
from psnrgnn.models import BuildModel
import sys, os
import os
import shutil
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
def train(model, graph, optimizer, max_epoch, if_early_stop):
x = graph.ndata["feat"]
label = graph.ndata['label']
train_mask = graph.ndata['train_mask']
val_mask = graph.ndata['val_mask']
test_mask = graph.ndata['test_mask']
# For Missing Vector Setting
x[val_mask] = 0
x[test_mask] = 0
epoch_iter = tqdm(range(max_epoch))
best_acc = 0
final_acc = 0
cnt = 0
for epoch in epoch_iter:
model.train()
output = model(graph, x)
loss_train = F.nll_loss(output[train_mask], label[train_mask])
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
with torch.no_grad():
acc_train = accuracy(output[train_mask], label[train_mask])
acc_val = evaluate(model, graph, x, val_mask)
acc_test = evaluate(model, graph, x, test_mask)
if acc_val > best_acc :
best_acc = acc_val
final_acc = acc_test
else:
cnt += 1
if cnt > 100 and if_early_stop:
break
epoch_iter.set_description(f"# Epoch {epoch}: train_loss: {loss_train.item():.4f} train_acc: {acc_train:.4f} val_acc: {acc_val:.4f} test_acc: {acc_test:.4f} final_acc: {final_acc:.4f} ")
return best_acc, final_acc
def main(args):
device = "cuda:" + str(args.device) if args.device >= 0 else "cpu"
graph, (num_features, num_classes, num_node) = load_dataset(args.dataset, args)
args.num_features = num_features
# return
graph = graph.to(device)
acc_list = []
val_acc_list = []
for i, seed in enumerate(args.seeds):
set_random_seed(seed)
model = BuildModel(
args.backbone,
num_features,
args.n_hid,
num_classes,
args.n_layers,
args.activation,
args.norm,
args.drop,
args.residual_type,
num_node
).build(args)
model = model.to(device)
optimizer = create_optimizer(args.optimizer, model, args.lr, args.weight_decay)
best_acc, final_acc = train(model, graph, optimizer, args.max_epoch, args.if_early_stop)
val_acc_list.append(best_acc.cpu())
acc_list.append(final_acc.cpu())
final_acc, final_acc_std = np.mean(acc_list), np.std(acc_list)
val_acc, val_acc_std = np.mean(val_acc_list), np.std(val_acc_list)
return final_acc, final_acc_std, val_acc, val_acc_std