-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
182 lines (147 loc) · 4.62 KB
/
main.py
File metadata and controls
182 lines (147 loc) · 4.62 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
import sys
import dill
import os
import pandas as pd
from subprocess import check_output
import multiprocess as mp
import time
from models.model_utils.evaluator import gen_eval, print_eval
from utils.src_to_test_train import main as src_tt
from models.model3 import model as model3
from models.model4 import model as model4
from models.model5 import model as model5
from models.model6 import model as model6
from models.model7 import model as model7
from models.model8 import model as model8
from models.model9 import model as model9
from models.model10 import model as model10
from models.model11 import model as model11
from models.model13 import model as model13
from models.model14 import model as model14
def get_predictions(model, k_fold, pred_path):
if os.path.exists(pred_path):
print(f'{model.get_name()}-k{k_fold} prediction file found')
return pd.read_csv(pred_path)
else:
return model.main(f'./well_data/k{k_fold}.csv', k_fold)
def get_eval(predictions, m_name, k_fold, eval_path):
if os.path.exists(eval_path):
print(f'{m_name}-k{k_fold} evaluation file found')
return pd.read_csv(eval_path)
else:
eval = gen_eval(predictions)
eval_df = pd.DataFrame(data={
'model': m_name,
'k': k_fold,
'sensitivity': eval['sensitivity'],
'f1': eval['f1'],
'accuracy': eval['accuracy'],
'precision': eval['precision'],
'specificity': eval['specificity'],
}, index=[0])
return eval_df
def build_model(m, k_fold):
if not os.path.exists(f'./models/{m}/model/'):
os.mkdir(f'./models/{m}/model/')
# create k fold model
if os.path.exists(f'./models/{m}/model/k{k_fold}'):
print(f'{m} k{k_fold} model built')
else:
print(f'{m} k{k_fold} not built, building…')
build_ia_model(m, k_fold)
def build_ia_model(m, k_fold):
print(f'building {m} k{k_fold}')
folds = [1, 2, 3, 4, 5]
folds.remove(k_fold)
os.mkdir(f'./models/{m}/model/k{k_fold}/')
cmd_arr = [
'node',
'node_modules/preprocessing/preprocessing/cli/produce-aggregate-data-files.js',
'-m',
f'{m}',
'-o',
f'./models/{m}/model/k{k_fold}/',
'-p',
f'./well_data/k{folds[0]}.csv',
f'./well_data/k{folds[1]}.csv',
f'./well_data/k{folds[2]}.csv',
f'./well_data/k{folds[3]}.csv',
'./node_modules/preprocessing/data/mouza-names.csv',
]
return check_output(cmd_arr)
def run_model(model, k_fold):
m_name = model.get_name()
print(f'running {m_name} k{k_fold}')
test_out=f'./prediction_data/{m_name}-k{k_fold}.csv'
eval_out=f'./evaluation_data/{m_name}-k{k_fold}.csv'
pred_df = get_predictions(model, k_fold, test_out)
eval_df = get_eval(pred_df, m_name, k_fold, eval_out)
pred_df.to_csv(test_out, index=False)
eval_df.to_csv(eval_out, index=False)
print(f'predictions written to {test_out}')
print(f'evaluation written to {eval_out}')
def extract_ia_data():
if os.path.exists('./well_data/src_data.json'):
return 'src data ready'
else:
print('preparing src data…')
cmd_arr = [
'npm',
'run',
'load-src-data',
]
return check_output(cmd_arr).decode()
def gen_test_train():
load_tt = False
for x in [1, 2, 3, 4, 5]:
if os.path.exists(f'./well_data/k{x}.csv'):
print(f'k{x} split already exists')
else:
load_tt = True
print(f'k{x} not found, generating k folds…')
break
if (load_tt):
for f in os.listdir('./well_data/'):
if f.endswith('csv'):
os.remove(os.path.join('./well_data/', f))
src_tt()
return
def unzip_geodata():
if os.path.exists('./geodata/'):
return 'geodata ready'
else:
print('unzipping geodata…')
cmd_arr = [
'npm',
'run',
'unzip-geodata',
]
return check_output(cmd_arr).decode()
if __name__ == '__main__':
print('\n______unzipping geodata______\n')
print(unzip_geodata())
print('\n______extracting data from iarsenic______\n')
print(extract_ia_data())
print('\n______create test train split______\n')
print(gen_test_train())
print('\n______building ia models______\n')
ia_models = ['model3', 'model4', 'model5']
bj = [] # BuildJobs
for m in ia_models:
for x in [1, 2, 3, 4, 5]:
p = mp.Process(target=build_model, args=(m, x,))
p.start()
bj.append(p)
time.sleep(0.05) # pause so logs come out in order
for j in bj:
j.join()
print('\n______running models______\n')
models = [model3, model4, model5, model6, model7, model8, model9, model10, model11, model13, model14]
rj = [] # RunJobs
for m in models:
for x in [1, 2, 3, 4, 5]:
p = mp.Process(target=run_model, args=(m, x,))
p.start()
rj.append(p)
for j in rj:
j.join()