-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_edges.py
More file actions
139 lines (110 loc) · 4.96 KB
/
train_edges.py
File metadata and controls
139 lines (110 loc) · 4.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
import argparse
import numpy as np
# from utils import time_logger
class EdgePredictor(nn.Module):
def __init__(self, embedding_dim, hidden_dim):
super(EdgePredictor, self).__init__()
self.fc1 = nn.Linear(embedding_dim * 2, hidden_dim)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return x
from utils.util import get_text_data
def preprocess(args):
ood_data, id_data= get_text_data(dataname='cora')
positive_edges = torch.load(f'./cora/{args.edge_type}_positive_edges.pt')
negative_edges = torch.load(f'./cora/{args.edge_type}_negative_edges.pt')
ood_embs = np.load('./cora/cora_ood_embs.npy')
id_embs = np.load('./cora/cora_id_embs.npy')
data_x = []
data_y = []
for i in range(len(positive_edges)):
for j in positive_edges[i]:
# paper_ood = ood_data[i]
# paper_id = id_data[j]
# import ipdb; ipdb.set_trace()
data_x.append(torch.cat([
torch.from_numpy(ood_embs[i]), torch.from_numpy(id_embs[j])]))
data_y.append(1)
for i in range(len(negative_edges)):
for j in negative_edges[i]:
# paper_ood = ood_data[i]
# paper_id = id_data[j]
data_x.append(torch.cat([
torch.from_numpy(ood_embs[i]), torch.from_numpy(id_embs[j])]))
data_y.append(0)
# import ipdb; ipdb.set_trace()
train_x, test_x, train_y, test_y = train_test_split(data_x, data_y, test_size=0.2, random_state=args.seed)
train_x = torch.stack(train_x).to(torch.float32) # 转换为 FloatTensor
train_y = torch.FloatTensor(train_y)
test_x = torch.stack(test_x).to(torch.float32) # 转换为 FloatTensor
test_y = torch.FloatTensor(test_y)
return train_x, train_y, test_x, test_y
# @time_logger
def train(args):
device = args.device
model = EdgePredictor(args.embedding_dim, args.hidden_dim).to(device)
train_x, train_y, test_x, test_y = preprocess(args)
train_dataset = TensorDataset(train_x, train_y)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
test_dataset = TensorDataset(test_x, test_y)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
best_accuracy = 0
epochs_no_improve = 0
num_epochs = args.epochs
for epoch in range(num_epochs):
for i, (node_embedding_pairs, batch_labels) in enumerate(train_loader):
node_embedding_pairs, batch_labels = node_embedding_pairs.to(device), batch_labels.to(device)
outputs = model(node_embedding_pairs)
outputs = outputs.squeeze()
loss = criterion(outputs, batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
total = 0
correct = 0
with torch.no_grad():
for node_embedding_pairs, batch_labels in test_loader:
node_embedding_pairs, batch_labels = node_embedding_pairs.to(device), batch_labels.to(device)
outputs = model(node_embedding_pairs)
predicted = (outputs.squeeze() > 0.5).float()
total += batch_labels.size(0)
correct += (predicted == batch_labels).sum().item()
test_accuracy = 100 * correct / total
print(f'Epoch {epoch+1}/{num_epochs}, Train Loss: {loss.item()}, Test Accuracy: {test_accuracy}%')
if test_accuracy > best_accuracy:
best_accuracy = test_accuracy
epochs_no_improve = 0
torch.save(model.state_dict(), 'cora/' + args.dataset + f'_{args.edge_type}.pth')
else:
epochs_no_improve += 1
if epochs_no_improve >= args.early_stop:
print(f'Early stopping triggered after {epoch + 1} epochs')
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--dataset', type=str, default='cora')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--embedding_dim', type=int, default=4096)
parser.add_argument('--hidden_dim', type=int, default=1024)
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--lr', type=float, default=0.00005)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--early_stop', type=int, default=20)
parser.add_argument('--edge_type', type=str, default='ood_id')
args = parser.parse_args()
train(args)