-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtask.py
More file actions
103 lines (79 loc) · 3.06 KB
/
task.py
File metadata and controls
103 lines (79 loc) · 3.06 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
from parse_args import args
from sklearn.preprocessing import normalize # 引入 normalize
import json
import numpy as np
from sklearn.cluster import KMeans
from sklearn.model_selection import KFold
from sklearn import linear_model
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error, mean_absolute_percentage_error
from sklearn.metrics import normalized_mutual_info_score
from sklearn.metrics import adjusted_rand_score
crime_counts = np.load(args.data_path + args.crime_counts, allow_pickle=True)
check_counts = np.load(args.data_path + args.check_counts, allow_pickle=True)
with open(args.data_path + args.mh_cd, 'r') as f:
mh_cd = json.load(f)
mh_cd_labels = np.zeros(180)
for i in range(180):
mh_cd_labels[i] = mh_cd[str(i)]
def regression(X_train, y_train, X_test, alpha):
reg = linear_model.Ridge(alpha=alpha)
reg.fit(X_train, y_train)
y_pred = reg.predict(X_test)
return y_pred
def kf_predict(X, Y):
kf = KFold(n_splits=5)
y_preds = []
y_truths = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Y[train_index], Y[test_index]
y_pred = regression(X_train, y_train, X_test, 1)
y_preds.append(y_pred)
y_truths.append(y_test)
return np.concatenate(y_preds), np.concatenate(y_truths)
def compute_metrics(y_pred, y_test):
y_pred[y_pred < 0] = 0
mae = mean_absolute_error(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
return mae, np.sqrt(mse), r2
def predict_crime(emb):
y_pred, y_test = kf_predict(emb, crime_counts)
mae, rmse, r2 = compute_metrics(y_pred, y_test)
return mae, rmse, r2
def predict_check(emb):
y_pred, y_test = kf_predict(emb, check_counts)
mae, rmse, r2 = compute_metrics(y_pred, y_test)
return mae, rmse, r2
def classify(emb, rand):
n = 12
# === 修改点开始 ===
# 在聚类前,先对 Embedding 进行 L2 归一化
# 这会消除“流量约束”带来的模长差异,只保留语义方向
emb_norm = normalize(emb, norm='l2', axis=1)
kmeans = KMeans(n_clusters=n, random_state=rand)
# 使用归一化后的 embedding 进行聚类
emb_labels = kmeans.fit_predict(emb_norm)
# === 修改点结束 ===
# kmeans = KMeans(n_clusters=n, random_state=rand)
# emb_labels = kmeans.fit_predict(emb)
nmi = normalized_mutual_info_score(mh_cd_labels, emb_labels)
ari = adjusted_rand_score(mh_cd_labels, emb_labels)
return nmi, ari
def clustering(emb):
nmi, ari = classify(emb, 3) # 9
return nmi, ari
if __name__ == '__main__':
emb = np.load('emb.npy', allow_pickle=True)
nmi, ari = clustering(emb)
print(nmi, ari)
mae, rmse, r2 = predict_crime(emb)
print("MAE: %.3f" % mae)
print("RMSE: %.3f" % rmse)
print("R2: %.3f" % r2)
print('>>>>>>>>>>>>>>>>> check')
mae, rmse, r2 = predict_check(emb)
print("MAE: %.3f" % mae)
print("RMSE: %.3f" % rmse)
print("R2: %.3f" % r2)
print('>>>>>>>>>>>>>>>>> clustering')