-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy patharguments.py
More file actions
128 lines (97 loc) · 5.46 KB
/
arguments.py
File metadata and controls
128 lines (97 loc) · 5.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
from datamodule import Dataset, DataModule
from model.neuralnetwork import FFN
from model.transformer import TransformerEncoder
import pdb
def load_args(parser, mode: str, model: str):
"""load arguments for respective mode and model"""
globals()[f"args_{model}"](parser)
def args_nn(parser):
# EarlyStopping
group = parser.add_argument_group("Early Stopping Configuration")
# group.add_argument("--monitor", type=str, default="loss/val_loss")
# group.add_argument("--es_mode", type=str, default="min")
group.add_argument("--patience", type=int, default=5, #multiply with check_val_every
help="number of bad validation epochs before stop, depends "
"on 'check_val_every' parameter as well")
# ModelCheckpoint
# group = parser.add_argument_group("Model Checkpoint Configuration")
# group.add_argument("--monitor", type=str, default="loss/val_loss")
# group.add_argument("--save_top_k", type=int, default=1)
# group.add_argument("--check_mode", type=str, default="min")
# dm
# already implemented in model method: group = parser.add_argument_group("Data Module Configuration")
group = DataModule.add_model_specific_args(parser) #add additional arguments directly in class method
# model
# already implemented in model method: group = parser.add_argument_group("Model Configuration")
group = FFN.add_model_specific_args(parser) #add additional arguments directly in class
# trainer
group = parser.add_argument_group("Training Configuration")
group.add_argument("--max_epochs", type=int, default=1000)
group.add_argument("--check_val_every", type=int, default=10)
# group.add_argument("--cpus_per_trial", type=int, default=8)
def args_lin(parser):
# Dataset args
group = Dataset.add_model_specific_args(parser)
# group = parser_train.add_argument_group("Model Configuration")
group.add_argument("--loss", type=str, choices=["log_loss", "hinge"],
required=True)
group.add_argument("--pca", action="store_true")
group = parser.add_argument_group("Sklearn Tune Configuration")
group.add_argument("--max_iters", type=int, default=300)
# group.add_argument("--njobs", type=int, default=2) #how many trials in parallel
def args_svm(parser):
# Dataset args
group = Dataset.add_model_specific_args(parser)
# group = parser_train.add_argument_group("Model Configuration")
# group.add_argument("--loss", type=str, choices=["hinge", "log_loss"],
# default="log_loss")
group.add_argument("--pca", action="store_true")
group = parser.add_argument_group("Sklearn Tune Configuration")
group.add_argument("--max_iters", type=int, default=300)
# group.add_argument("--njobs", type=int, default=2) #how many trials in parallel
def args_rf(parser):
# Dataset args
group = Dataset.add_model_specific_args(parser)
# group = parser_train.add_argument_group("Model Configuration")
# group.add_argument("--loss", type=str, choices=["hinge", "log_loss"],
# default="log_loss")
group.add_argument("--pca", action="store_true")
group = parser.add_argument_group("Sklearn Tune Configuration")
group.add_argument("--n_estimators", type=int, default=100)
# group.add_argument("--njobs", type=int, default=2) #how many trials in parallel
def args_xgb(parser):
# Dataset args
group = Dataset.add_model_specific_args(parser)
# EarlyStopping
group = parser.add_argument_group("Early Stopping Configuration")
# group.add_argument("--monitor", type=str, default="loss/val_loss")
# group.add_argument("--es_mode", type=str, default="min")
group.add_argument("--patience", type=int, default=32,
help="number of bad epochs before stop")
group = parser.add_argument_group("Training Configuration")
group.add_argument("--num_boost_round", type=int, default=1000)
# group.add_argument("--cpus_per_trial", type=int, default=8)
def args_transformer(parser):
# EarlyStopping
group = parser.add_argument_group("Early Stopping Configuration")
# group.add_argument("--monitor", type=str, default="loss/val_loss")
# group.add_argument("--es_mode", type=str, default="min")
group.add_argument("--patience", type=int, default=5, #multiply with check_val_every
help="number of bad validation epochs before stop, depends "
"on 'check_val_every' parameter as well")
# ModelCheckpoint
# group = parser.add_argument_group("Model Checkpoint Configuration")
# group.add_argument("--monitor", type=str, default="loss/val_loss")
# group.add_argument("--save_top_k", type=int, default=1)
# group.add_argument("--check_mode", type=str, default="min")
# dm
# already implemented in model method: group = parser.add_argument_group("Data Module Configuration")
group = DataModule.add_model_specific_args(parser) #add additional arguments directly in class method
# model
# already implemented in model method: group = parser.add_argument_group("Model Configuration")
group = TransformerEncoder.add_model_specific_args(parser) #add additional arguments directly in class
# trainer
group = parser.add_argument_group("Training Configuration")
group.add_argument("--max_epochs", type=int, default=1000)
group.add_argument("--check_val_every", type=int, default=10)
# group.add_argument("--cpus_per_trial", type=int, default=8)