-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathtrain_model.py
More file actions
339 lines (300 loc) · 14 KB
/
train_model.py
File metadata and controls
339 lines (300 loc) · 14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
"""
A function to train a generic model using the matformer library
A JSON config file must be provided
But each argument can be overridden from the CLI
"""
import argparse, json, torch, pytorch_lightning as pl, wandb
from pathlib import Path
from importlib import import_module
from transformers import AutoTokenizer
from matformer.matformer_tokenizers import MatformerTokenizer
from matformer.data_module import MatformerDataModule
from matformer.model_config import ModelConfig
from matformer.models import PL_ModelWrapper
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import Callback, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from pytorch_lightning.profilers import AdvancedProfiler
#from pytorch_lightning.plugins import DDPPlugin
import math, os
from datetime import datetime
def load_config(path):
with open(path, 'r') as f:
return json.load(f)
def apply_overrides(cfg, overrides):
for key, val in overrides.items():
keys = key.split('.')
d = cfg
for k in keys[:-1]:
d = d.setdefault(k, {})
try:
d[keys[-1]] = eval(val)
except:
d[keys[-1]] = val
return cfg
import argparse
import sys
def parse_args():
parser = argparse.ArgumentParser(description='Train a Matformer model')
parser.add_argument('--config', type=str, help="Path to single combined config file")
parser.add_argument('--model_config', type=str, help="Path to model_config.json")
parser.add_argument('--training_config', type=str, help="Path to train_config.json")
parser.add_argument('--data_config', type=str, help="Path to data_config.json")
parser.add_argument('--tokenizer_config', type=str, help="Path to tokenizer_config.json")
parser.add_argument('--override', nargs='*', default=[], help="Override config parameters as key=value pairs")
parser.add_argument('--gpu', type=int, default=1, help="Number of GPU(s)")
parser.add_argument('--nodes', type=int, default=1, help="Number of Node(s)")
parser.add_argument('--checkpoint', type=str, default=None, help="Path to checkpoint file")
parser.add_argument('--start-from-scratch', action='store_true', help="Start training from scratch")
parser.add_argument('--simulate', action='store_true', help="Instantiate model and print state_dict shapes, then exit")
parser.add_argument('--dump-json', type=str, default=None, help="Path to dump JSON state dict shapes")
parser.add_argument('--debug-steps', type=int, default=None, help="If you choose this, train for one epoch on this number of steps")
parser.add_argument('--compile', action='store_true', help="Torch.compile the whole model")
parser.add_argument('--load-mode', type=str, choices=['full', 'weights_only', 'weights_and_optimizer'], default='full',help="Checkpoint loading strategy")
parser.add_argument('--precision', type=str, choices=['16-mixed', 'bf16-mixed', '32','16','bf16','32-true','bf16-true','16-true','64-true','transformer-engine'],
default='bf16-mixed', help="precision")
args = parser.parse_args()
separate_configs = {
'model_config': args.model_config,
'training_config': args.training_config,
'data_config': args.data_config,
'tokenizer_config': args.tokenizer_config
}
separate_count = sum(1 for v in separate_configs.values() if v is not None)
# Enforce exclusive requirement
if args.config is not None and separate_count > 0:
parser.error("Cannot specify both --config and individual config files. Choose ONE approach.")
if args.config is None and separate_count == 0:
parser.error("Must specify either --config OR all four individual config files (--model_config, --training_config, --data_config, --tokenizer_config)")
if args.config is None and separate_count != 4:
missing = [k for k, v in separate_configs.items() if v is None]
parser.error(f"Missing {len(missing)} individual config file(s): {', '.join(missing)}")
# Build config_paths dictionary
if args.config is not None:
config_paths = args.config
else:
config_paths = {
"model_config": args.model_config,
"training": args.training_config,
"data": args.data_config,
"tokenizer": args.tokenizer_config
}
overrides = {}
for item in args.override:
try:
k, v = item.split('=', 1) # Split on first '=' only
overrides[k] = v
except ValueError:
parser.error(f"Override '{item}' must be in key=value format")
return config_paths, overrides, args.gpu, args.checkpoint, args.start_from_scratch,args.simulate,args.dump_json,args.debug_steps,args.compile,args.nodes,args.load_mode,args.precision
def get_model_class(model_class: str):
module = import_module("matformer.transformer_blocks")
return getattr(module, model_class)
def extract_state_dict_shapes(state_dict):
"""
Extracts a dict of param_name -> shape string
"""
shapes = {}
for k, v in state_dict.items():
shapes[k] = "x".join(str(d) for d in v.shape)
return shapes
def save_state_dict_json(state_dict, path):
shapes = extract_state_dict_shapes(state_dict)
with open(path, 'w') as f:
json.dump(shapes, f, indent=2)
def load_and_prepare_configs(config_paths, overrides):
"""
Loads multiple separate JSON configs, merges them, applies overrides,
and derives any dependent configuration properties (like is_causal).
"""
if isinstance(config_paths,dict): # Multiple config files
model_cfg_dict = load_config(config_paths["model_config"])
train_cfg_dict = load_config(config_paths["training"])
data_cfg_dict = load_config(config_paths["data"])
tok_cfg_dict = load_config(config_paths["tokenizer"])
cfg = {
"model_class": model_cfg_dict.pop("model_class", None),
"save_dir": model_cfg_dict.pop("save_dir", "./checkpoints"),
"wandb_project": model_cfg_dict.pop("wandb_project", "default_project"),
"wandb_run_name": model_cfg_dict.pop("wandb_run_name", "default_run"),
"model_config": model_cfg_dict,
"training": train_cfg_dict,
"data": data_cfg_dict,
"tokenizer": tok_cfg_dict
}
else: #Single config file
json_config=load_config(config_paths)
cfg = {
"model_class": json_config['model_class'],
"save_dir": json_config.pop("save_dir", "./checkpoints"),
"wandb_project": json_config.pop("wandb_project", "default_project"),
"wandb_run_name": json_config.pop("wandb_run_name", "default_run"),
"model_config": json_config['model_config'],
"training": json_config['training'],
"data": json_config['data'],
"tokenizer": json_config['tokenizer'],
"training_objective":json_config.pop("training_objective",None),
"is_causal":json_config.pop("is_causal",None)
}
cfg = apply_overrides(cfg, overrides)
model_class = cfg['model_class']
if getattr(cfg['model_config'],'training_objective',None) is None:
cfg['model_config']['training_objective'] = "autoregressive" if model_class == "Autoregressive_Model" else "masked"
if getattr(cfg['model_config'],'is_causal',None) is None:
cfg['model_config']['is_causal'] = True if model_class == "Autoregressive_Model" else False
cfg['model_config']['tokenizer_type']=cfg['tokenizer']['type']
cfg['model_config']['tokenizer_name']=cfg['tokenizer']['pretrained_name']
if 'wanted_from_strategy' not in cfg['data'].keys():
cfg['data']['wanted_from_strategy']='chunked_tokens'
model_config_dict_clean = cfg['model_config']
train_config_dict = cfg['training']
data_config_dict = cfg['data']
tok_config_dict = cfg['tokenizer']
return model_config_dict_clean, train_config_dict, data_config_dict, tok_config_dict, cfg
def main():
#config_path, overrides, device_count, ckpt_arg, start_scratch, simulate, dump_json = parse_args()
#cfg = apply_overrides(load_config(config_path), overrides)
config_paths, overrides, device_count, ckpt_arg, start_scratch, simulate, dump_json, debug_steps,_compile, num_nodes, load_mode, precision = parse_args()
model_config_dict, train_cfg, data_cfg, tok_cfg, cfg = load_and_prepare_configs(config_paths, overrides)
#model_cfg = ModelConfig(**cfg['model_config'])
model_cfg = ModelConfig(**model_config_dict)
save_dir = cfg.get('save_dir', './checkpoints')
pl.seed_everything(train_cfg.get('seed', 27))
# Detect device
if torch.cuda.is_available():
accelerator = 'gpu'
device_string = 'cuda'
elif torch.backends.mps.is_available():
accelerator = device_string = 'mps'
else:
accelerator = device_string = 'cpu'
# Create data module with MDAT dataset
data = MatformerDataModule(
mdat_path=data_cfg['data_root'],
iteration_modality=data_cfg['wanted_from_strategy'],
pad_token_id=model_cfg.pad_token_id,
varlen_strategy=tok_cfg['varlen_strategy'],
mdat_strategy=data_cfg['mdat_strategy'],
mdat_view=data_cfg['mdat_view'],
with_meta=False,
max_seq_len=model_cfg.max_position_embeddings,
batch_size=data_cfg['batch_size'],
num_devices=device_count
)
data.setup()
# Calculate training steps if dataset length is available
max_epochs = train_cfg.get("max_epochs", 1)
if hasattr(data, '__len__') and len(data) > 0: #Nel caso di più GPU viene già divisa per numero di GPU (es. /4)
num_batches = math.ceil(len(data) / data_cfg["batch_size"])
accumulate_grad_batches = train_cfg.get("accumulate_grad_batches", 1)
total_steps = (num_batches // accumulate_grad_batches) * max_epochs // num_nodes # Dividi anche per i nodi
train_cfg["total_steps"] = total_steps
train_cfg["num_batches"] = num_batches
else:
print("The Datamodule is not returning the length. Thus, LR scheduling is disabled")
train_cfg["lr_scheduling"] = False
print("Len debug")
print(data.__len__())
# Initialize model
ModelClass = get_model_class(cfg['model_class'])
model = PL_ModelWrapper(
ModelClass,
config=model_cfg,
tokenizer=None,
train_config=train_cfg,
device=device_string,
batch_size=data_cfg['batch_size'],
load_mode=load_mode
)
if simulate:
print("=== SIMULATION MODE ===")
print("Stable state_dict parameter names and shapes:")
shapes = extract_state_dict_shapes(model.parameters_state_dict())
for k, v in shapes.items():
print(f"{k}: {v}")
if dump_json:
save_state_dict_json(model.parameters_state_dict(), dump_json)
print(f"State dict shapes saved to {dump_json}")
return
# Handle checkpoint loading
ckpt_path = None
if not start_scratch:
if ckpt_arg and os.path.exists(ckpt_arg):
ckpt_path = ckpt_arg
else:
last_ckpt = Path(save_dir) / "last.ckpt"
if last_ckpt.exists():
print(f"Resuming training from {last_ckpt}")
ckpt_path = str(last_ckpt)
else:
print("No checkpoint found, starting from scratch.")
####
### Tutta la parte sui nomi dei checkpoint è INAFFIDABILE! ###
####
# Create timestamped checkpoint filename to avoid name clashes
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
checkpoint_name = train_cfg.get('checkpoint_name', 'model') # jerik
run_name = cfg.get('wandb_run_name', 'training-run') # jerik
if not debug_steps:
checkpoint_name=f"{checkpoint_name}_{timestamp}"
else:
checkpoint_name=f"{checkpoint_name}_DEBUG_{debug_steps}_{timestamp}"
# Setup logging
wandb_logger = WandbLogger(
name=f"{run_name}_{timestamp}",
project=cfg.get('wandb_project', 'matformer'),
config=cfg
)
checkpoint = ModelCheckpoint(
dirpath=save_dir,
filename=checkpoint_name,
save_top_k=1,
save_last=True,
every_n_train_steps=train_cfg.get("save_every_n_steps", None),
enable_version_counter=True,
save_on_train_epoch_end=True
)
torch.set_float32_matmul_precision('high')
if debug_steps is not None:
max_epochs=None
max_steps=debug_steps
else:
max_steps=-1
# Create trainer
strategy=DDPStrategy(gradient_as_bucket_view=False,static_graph=False,find_unused_parameters=False)
trainer = pl.Trainer(
logger=wandb_logger,
callbacks=[checkpoint],
precision=precision,
gradient_clip_val=train_cfg.get('gradient_clip_val', 1),
accelerator=accelerator,
devices=device_count,
log_every_n_steps=10,
accumulate_grad_batches=train_cfg.get('accumulate_grad_batches', 1),
default_root_dir=save_dir,
max_epochs=max_epochs,
max_steps=max_steps,
strategy=strategy,
num_nodes=num_nodes
)
if _compile:
try:
if tok_cfg['varlen_strategy']:
print("Trying compilation: dynamic sequence length, normal")
try:
model=torch.compile(model, dynamic=True)
except:
print("Trying compilation: dynamic sequence length, normal autotune")
model=torch.compile(model, dynamic=True)
else:
print("Trying compilation: fixed sequence length, max autotune")
try:
model=torch.compile(model, mode="max-autotune")
except:
print("Trying compilation: fixed sequence length, normale autotune")
model=torch.compile(model)
except:
print("Compilation failed! Running non-compiled model")
trainer.fit(model, data, ckpt_path=ckpt_path)
if __name__ == '__main__':
main()