-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathtrain.py
More file actions
139 lines (115 loc) · 4.72 KB
/
train.py
File metadata and controls
139 lines (115 loc) · 4.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
"""train.py: Train a model.
Usage:
- Single training run, from interactive session: python train.py -config config-base.yaml
- Single training run, on slurm: sbatch train.sb config-base.yaml
- Hyperparameter sweep, on slurm: see README.md
"""
import callbacks
import dataset
import models
import lr_schedules
import os
import utils
import wandb
# improve connection with wandb, from: https://github.com/wandb/wandb/issues/3326#issuecomment-1065328606
os.environ["WANDB_START_METHOD"] = "thread"
def train(args):
# Start `wandb`
config, project = utils.get_config(args.config)
# Handle wandb directory configuration
wandb_dir = config.get('dir')
if wandb_dir and wandb_dir.lower() != 'none':
# Convert relative path to absolute if needed
if not os.path.isabs(wandb_dir):
wandb_dir = os.path.abspath(wandb_dir)
# Create directory if it doesn't exist
os.makedirs(wandb_dir, exist_ok=True)
else:
# Default to current working directory if dir is not set, empty, or 'none'
wandb_dir = os.getcwd()
# Use the name from the config if specified, otherwise let wandb generate a random name
wandb_name = config.get('name')
if wandb_name and wandb_name.lower() != 'none':
# Use specified name
pass
else:
# Let wandb generate a random name
wandb_name = None
# create a unique run ID that includes the name if not none
wandb_id = utils.generate_unique_id(wandb_name)
wandb.init(
config=config,
project=project,
dir=wandb_dir,
name=wandb_name,
id=wandb_id,
mode=args.wandb_mode)
utils.validate_config(wandb.config)
# Get datasets
train_data = dataset.SequenceTfDataset(
wandb.config.train_data_paths, wandb.config.train_targets,
targets_are_classes=wandb.config.targets_are_classes, endless=True,
batch_size=wandb.config.batch_size,
reverse_complement=wandb.config.use_reverse_complement)
val_data = dataset.SequenceTfDataset(
wandb.config.val_data_paths, wandb.config.val_targets,
targets_are_classes=wandb.config.targets_are_classes,
endless=not wandb.config.use_exact_val_metrics,
batch_size=wandb.config.batch_size,
reverse_complement=wandb.config.use_reverse_complement)
utils.validate_datasets([train_data, val_data])
# Get training details
steps_per_epoch_train, steps_per_epoch_val = utils.get_step_size(
wandb.config, train_data, val_data)
class_weight = utils.get_class_weight(wandb.config, train_data)
# Get model
lr_schedule = lr_schedules.get_lr_schedule(steps_per_epoch_train, wandb.config)
model = models.get_model(
train_data.seq_shape, train_data.num_classes, train_data.class_to_idx_mapping, lr_schedule, wandb.config)
# Train
callback_fns = callbacks.get_training_callbacks(wandb.config, model, steps_per_epoch_train)
if args.debugtrain:
# limit training to 1 step per epoch for debugging
steps_per_epoch_train=1
model.fit(
train_data.dataset,
epochs=wandb.config.num_epochs,
steps_per_epoch=steps_per_epoch_train,
validation_data=val_data.dataset,
validation_steps=steps_per_epoch_val,
callbacks=callback_fns,
class_weight=class_weight)
# CLR tail
if wandb.config.lr_schedule == 'cyclic':
finetune_clr_tail(steps_per_epoch_train, steps_per_epoch_val, class_weight, train_data, val_data, model, wandb.config)
def finetune_clr_tail(steps_per_epoch_train, steps_per_epoch_val, class_weight, train_data, val_data, model, config):
"""Train with a linear LR decay at the end of a one-cycle LR schedule.
E.g. the final linear segment illustrated at https://raw.githubusercontent.com/titu1994/keras-one-cycle/master/images/one_cycle_lr.png
"""
lr_schedule = lr_schedules.get_linear_lr_schedule(steps_per_epoch_train, config.clr_tail_epochs, config.lr_init, config.lr_init / 10)
tail_config = dict(config).copy()
if config.momentum_schedule == 'cyclic':
tail_config['optimizer_args']['momentum'] = config.momentum_max
optimizer = models.get_optimizer(lr_schedule, tail_config)
metrics = models.get_metrics(train_data.num_classes, train_data.class_to_idx_mapping, config)
model.compile(optimizer=optimizer, loss=model.loss, metrics=metrics)
callback_fns = callbacks.get_training_callbacks(config, model, steps_per_epoch_train, disable_momentum=True)
model.fit(
train_data.dataset,
epochs=config.clr_tail_epochs,
steps_per_epoch=steps_per_epoch_train,
validation_data=val_data.dataset,
validation_steps=steps_per_epoch_val,
callbacks=callback_fns,
class_weight=class_weight)
def get_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-config', type=str, required=True)
parser.add_argument('-wandb-mode', type=str)
parser.add_argument('-debugtrain', action='store_true')
# parse_known_args() allows hyperparameters to be passed in during sweeps
args, _ = parser.parse_known_args()
return args
if __name__ == '__main__':
train(get_args())