forked from visipedia/newt
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathevaluate_linear_models.py
More file actions
155 lines (107 loc) · 5.4 KB
/
evaluate_linear_models.py
File metadata and controls
155 lines (107 loc) · 5.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import argparse
from functools import partial
import os
import time
import numpy as np
import pandas as pd
import tqdm
import configs
import linear_evaluation
def evalute_features(features_df, evaluator_fn):
task_results = []
pbar = tqdm.tqdm(features_df.iterrows())
for i, row in pbar:
dataset_name = row['name']
pbar.set_description("Evaluating %s" % dataset_name)
X_train = row['X_train']
X_test = row['X_test']
y_train = row['y_train']
y_test = row['y_test']
results = evaluator_fn(X_train, y_train, X_test, y_test)
results['name'] = dataset_name
task_results.append(results)
pbar.close()
return task_results
def analyze_features(feature_dir, results_dir, evaluator_fn, overwrite=False):
for model_spec in configs.model_specs:
model_name = model_spec['name']
print("Evaluating features from %s" % model_name)
st = time.time()
results_fp = os.path.join(results_dir, model_name + ".pkl")
# Have we already run evaluation?
if os.path.exists(results_fp) and not overwrite:
print("Found existing results file for model %s at %s" % (model_name, results_fp))
continue
# Make sure the features exists for this model
feature_fp = os.path.join(feature_dir, model_name + ".pkl")
if not os.path.exists(feature_fp):
print("WARNING: did not find features for model %s at location %s" % (model_name, feature_fp))
continue
# Load in the features extracted by this model
feature_df = pd.read_pickle(feature_fp)
# Evaluate the features
results = evalute_features(feature_df, evaluator_fn)
# Save off the results
results_df = pd.DataFrame(results)
results_df['model_name'] = model_name
results_df.to_pickle(results_fp)
# Print the total time the experiment took
et = time.time()
total_time = et - st
hours, remainder = divmod(total_time, 3600)
minutes, seconds = divmod(remainder, 60)
print('Evaluation Time: {:02}:{:02}:{:02}'.format(int(hours), int(minutes), int(seconds)))
print()
def parse_args():
parser = argparse.ArgumentParser(description='Train and evaluate linear models.')
parser.add_argument('--feature_dir', dest='feature_dir',
help='Path to the directory containing extracted features.', type=str,
required=True)
parser.add_argument('--result_dir', dest='result_dir',
help='Path to the directory to store results.', type=str,
required=True)
parser.add_argument('--model', dest='model',
help='Model type', type=str,
choices=['logreg', 'sgd', 'linearsvc'],
required=True)
parser.add_argument('--overwrite', dest='overwrite',
help='Overwrite existing saved features.',
required=False, action='store_true', default=False)
parser.add_argument('--max_iter', dest='max_iter',
help='Maximum number of iterations taken for the solvers to converge.', type=int,
required=False, default=100)
parser.add_argument('--standardize', dest='standardize',
help='Standardize features by removing the mean and scaling to unit variance.',
required=False, action='store_true', default=False)
parser.add_argument('--normalize', dest='normalize',
help='Scale feature vectors individually to unit norm.',
required=False, action='store_true', default=False)
parser.add_argument('--grid_search', dest='grid_search',
help='Search for optimal regularization terms.',
required=False, action='store_true', default=False)
parser.add_argument('--dual', dest='dual',
help='Use the dual formulation of the SVM (only relevant for `model = linearsvc`)',
required=False, action='store_true', default=False)
parsed_args = parser.parse_args()
return parsed_args
if __name__ == '__main__':
args = parse_args()
if not os.path.exists(args.result_dir):
print("Creating %s to store results" % args.result_dir)
os.makedirs(args.result_dir)
if args.model == 'logreg':
evaluator_fn = partial(linear_evaluation.logreg, max_iter=args.max_iter, grid_search=args.grid_search, standardize=args.standardize, normalize=args.normalize)
elif args.model == 'sgd':
evaluator_fn = partial(linear_evaluation.sgd, max_iter=args.max_iter, grid_search=args.grid_search, standardize=args.standardize, normalize=args.normalize)
elif args.model == 'linearsvc':
evaluator_fn = partial(linear_evaluation.linearsvc, max_iter=args.max_iter, grid_search=args.grid_search, standardize=args.standardize, normalize=args.normalize, dual=args.dual)
else:
raise ValueError("Unknown model type")
st = time.time()
analyze_features(args.feature_dir, args.result_dir, evaluator_fn, args.overwrite)
et = time.time()
total_time = et - st
hours, remainder = divmod(total_time, 3600)
minutes, seconds = divmod(remainder, 60)
print('Total Evaluation Time: {:02}:{:02}:{:02}'.format(int(hours), int(minutes), int(seconds)))
print()