-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_bt_vae.py
More file actions
611 lines (493 loc) · 23.9 KB
/
train_bt_vae.py
File metadata and controls
611 lines (493 loc) · 23.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset, Dataset
import matplotlib.pyplot as plt
import os
from sklearn.metrics import accuracy_score, precision_recall_curve, auc, roc_curve, roc_auc_score
from tqdm import tqdm
import argparse
import datetime
import random
from scipy import stats
import json
def set_seed(seed=1000):
"""
Set random seed for reproducibility
Args:
seed: Random seed value
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
print(f"Random seed set to {seed} for reproducibility")
class BradleyTerryDataset(Dataset):
"""
Dataset for Bradley-Terry model
Pairs positive/negative features by shared index
"""
def __init__(self, positive_features, negative_features, num_pairs=None):
self.positive_features = positive_features
self.negative_features = negative_features
# Ensure equal counts
assert len(positive_features) == len(negative_features), "Positive and negative counts must match"
# Pair by index: each positive pairs with the negative at the same index
self.indices = list(range(len(positive_features)))
# Optionally limit number of pairs
if num_pairs is not None and num_pairs < len(self.indices):
self.indices = self.indices[:num_pairs]
def __len__(self):
return len(self.indices)
def __getitem__(self, idx):
index = self.indices[idx]
return self.positive_features[index], self.negative_features[index]
class RewardModel(nn.Module):
"""
Reward model that outputs a scalar reward
"""
def __init__(self, input_dim, hidden_dim=256, dropout_rate=0.0):
super(RewardModel, self).__init__()
# self.model = nn.Sequential(
# nn.Linear(input_dim, hidden_dim),
# nn.ReLU(),
# nn.Linear(hidden_dim, hidden_dim // 2),
# nn.ReLU(),
# nn.Linear(hidden_dim // 2, 1)
# )
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self, x):
return self.model(x).squeeze()
def bradley_terry_loss(reward_pos, reward_neg):
"""
Bradley-Terry loss
Args:
reward_pos: Rewards for positives
reward_neg: Rewards for negatives
Returns:
Loss value
"""
# 计算正例相对于负例的偏好概率
logits = reward_pos - reward_neg
loss = -torch.mean(torch.log(torch.sigmoid(logits)))
return loss
def train_reward_model(train_positive, train_negative, val_positive=None, val_negative=None,
batch_size=64, epochs=50, lr=1e-4, hidden_dim=256, dropout_rate=0.0,
early_stopping_patience=5, output_dir="reward_model_results", num_pairs=None):
"""
Train a Bradley-Terry reward model
Args:
train_positive: Positive features for training
train_negative: Negative features for training
val_positive: Validation positives (or split from train if None)
val_negative: Validation negatives (or split from train if None)
batch_size: Batch size
epochs: Number of epochs
lr: Learning rate
hidden_dim: Hidden layer size
dropout_rate: Dropout rate
early_stopping_patience: Early stopping patience
output_dir: Output directory
num_pairs: Number of pairs to use; if None, use all
Returns:
Trained model
"""
# 创建输出目录
os.makedirs(output_dir, exist_ok=True)
# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Device: {device}")
# 准备训练数据
if val_positive is None or val_negative is None:
# Split validation set from training
val_ratio = 0.1
val_pos_size = int(len(train_positive) * val_ratio)
val_neg_size = int(len(train_negative) * val_ratio)
val_positive = train_positive[-val_pos_size:]
val_negative = train_negative[-val_neg_size:]
train_positive = train_positive[:-val_pos_size]
train_negative = train_negative[:-val_neg_size]
# Datasets and loaders
train_dataset = BradleyTerryDataset(train_positive, train_negative, num_pairs=num_pairs)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataset = BradleyTerryDataset(val_positive, val_negative, num_pairs=num_pairs // 10 if num_pairs else None)
val_loader = DataLoader(val_dataset, batch_size=batch_size)
# Model
input_dim = train_positive.shape[1]
model = RewardModel(input_dim, hidden_dim, dropout_rate).to(device)
# Optimizer
optimizer = optim.Adam(model.parameters(), lr=lr)
# Training
best_val_loss = float('inf')
patience_counter = 0
train_losses = []
val_losses = []
print(f"Start training: {epochs} epochs, ~{len(train_loader)} batches/epoch")
for epoch in range(epochs):
# Train
model.train()
train_loss = 0.0
for pos_inputs, neg_inputs in tqdm(train_loader, desc=f"Epoch {epoch+1}/{epochs} [Train]"):
pos_inputs, neg_inputs = pos_inputs.to(device), neg_inputs.to(device)
# Forward pass
reward_pos = model(pos_inputs)
reward_neg = model(neg_inputs)
# Compute loss
loss = bradley_terry_loss(reward_pos, reward_neg)
# Backprop and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item() * pos_inputs.size(0)
train_loss /= len(train_loader.dataset)
train_losses.append(train_loss)
# Validate
model.eval()
val_loss = 0.0
val_correct = 0
val_total = 0
with torch.no_grad():
for pos_inputs, neg_inputs in tqdm(val_loader, desc=f"Epoch {epoch+1}/{epochs} [Val]"):
pos_inputs, neg_inputs = pos_inputs.to(device), neg_inputs.to(device)
# Forward pass
reward_pos = model(pos_inputs)
reward_neg = model(neg_inputs)
# Compute loss
loss = bradley_terry_loss(reward_pos, reward_neg)
val_loss += loss.item() * pos_inputs.size(0)
# Accuracy: reward_pos should be greater than reward_neg
val_correct += torch.sum(reward_pos > reward_neg).item()
val_total += pos_inputs.size(0)
val_loss /= len(val_loader.dataset)
val_losses.append(val_loss)
val_accuracy = val_correct / val_total
print(f"Epoch {epoch+1}/{epochs} - Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}, Val Accuracy: {val_accuracy:.4f}")
# Early stopping
if val_loss < best_val_loss:
best_val_loss = val_loss
patience_counter = 0
# Save best model
torch.save(model.state_dict(), f"{output_dir}/best_model.pt")
else:
patience_counter += 1
if patience_counter >= early_stopping_patience:
print(f"Early stopping at epoch {epoch+1}")
break
# Plot loss curves
plt.figure(figsize=(10, 6))
plt.plot(train_losses, label='Train Loss')
plt.plot(val_losses, label='Val Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Train/Val Loss Curves')
plt.legend()
plt.grid(True)
plt.savefig(f"{output_dir}/loss_curve.png", dpi=300)
plt.close()
# Load best model
model.load_state_dict(torch.load(f"{output_dir}/best_model.pt"))
return model
def evaluate_best_of_n(model, test_features, golden_rewards=None, output_dir="best_of_n_results"):
"""
Evaluate model on multi-response data (best-of-n)
Args:
model: Trained reward model
test_features: Features with shape [n_samples, n_responses, n_layers, dim] or [n_samples, n_responses, dim]
golden_rewards: Ground-truth rewards [n_samples, n_responses]
output_dir: Output directory
Returns:
results: Dict with evaluation metrics
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.eval()
# 创建输出目录
os.makedirs(output_dir, exist_ok=True)
# 处理四维特征,取最后一层
if len(test_features.shape) == 4:
n_samples, n_responses, n_layers, feature_dim = test_features.shape
test_features = test_features[:, :, -1, :]
else:
n_samples, n_responses, feature_dim = test_features.shape
selected_rewards = []
detailed_results = {
'sample_rewards': [],
'selected_indices': [],
'selected_golden_rewards': []
}
for i in tqdm(range(n_samples)):
sample_rewards = []
for j in range(n_responses):
feature = test_features[i, j]
with torch.no_grad():
reward = model(torch.FloatTensor(feature).unsqueeze(0).to(device)).item()
sample_rewards.append(reward)
# Index of highest predicted reward
top_idx = np.argmax(sample_rewards)
# Record golden reward of selected response
if golden_rewards is not None:
selected_reward = golden_rewards[i, top_idx]
selected_rewards.append(selected_reward)
# 记录详细结果
detailed_results['sample_rewards'].append(sample_rewards)
detailed_results['selected_indices'].append(int(top_idx))
detailed_results['selected_golden_rewards'].append(float(selected_reward))
# Compute mean/std golden reward
mean_reward = np.mean(selected_rewards) if selected_rewards else 0.0
std_reward = np.std(selected_rewards) if selected_rewards else 0.0
results = {
'mean_golden_reward': mean_reward,
'std_golden_reward': std_reward,
'n_samples': n_samples
}
# Save detailed results to JSON
with open(os.path.join(output_dir, 'best_of_n_detailed_results.json'), 'w') as f:
json.dump(detailed_results, f, indent=2)
return results
def calculate_lipschitz_constant(model):
"""
Estimate model Lipschitz constant (product of linear layer spectral norms)
Args:
model: Trained reward model
Returns:
lipschitz_constant: Estimated Lipschitz constant
"""
# Collect linear layers
linear_layers = [module for module in model.modules() if isinstance(module, nn.Linear)]
# Spectral norm (via power iteration)
lipschitz_constants = []
for layer in linear_layers:
# Weight matrix
W = layer.weight.data
# Power iteration for largest singular value
u = torch.randn(W.shape[1], device=W.device)
v = torch.randn(W.shape[0], device=W.device)
for _ in range(100): # 迭代次数
v = torch.nn.functional.normalize(torch.mv(W, u), dim=0)
u = torch.nn.functional.normalize(torch.mv(W.t(), v), dim=0)
# Largest singular value
sigma = torch.norm(torch.mv(W, u))
lipschitz_constants.append(sigma)
# Product across layers
total_lipschitz = torch.prod(torch.tensor(lipschitz_constants))
return total_lipschitz.item()
def main():
# CLI
parser = argparse.ArgumentParser(description='Train Bradley-Terry reward model')
parser.add_argument('--train_size', type=float, default=1000.0,
help='Training size in thousands (e.g., 10 => 10k, 0.1 => 100, -1 => all).')
parser.add_argument('--batch_size', type=int, default=128, help='Batch size')
parser.add_argument('--epochs', type=int, default=5, help='Epochs')
parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate')
parser.add_argument('--hidden_dim', type=int, default=256, help='Hidden dim')
parser.add_argument('--dropout', type=float, default=0.0, help='Dropout rate')
parser.add_argument('--seed', type=int, default=132, help='Random seed')
parser.add_argument('--vae_reconstructed_path', type=str, default=None, help='Path to VAE reconstructed features')
parser.add_argument('--train_data_path', type=str, default='/nobackup2/taoleitian/neurips/embeddings/hh_rlhf/llama_instruct_10k/train_100k.npy', help='Train data path')
parser.add_argument('--test_data_path', type=str, default='/nobackup2/taoleitian/neurips/embeddings/hh_rlhf/llama_instruct_10k/test.npy', help='Test data path')
parser.add_argument('--multi_response_features_path', type=str, default='/nobackup2/taoleitian/neurips/embeddings/hh_rlhf/llama_instruct_10k/multi_response/multi_response_embeddings.npy', help='Multi-response features path')
parser.add_argument('--multi_response_rewards_path', type=str, default='/nobackup2/taoleitian/neurips/embeddings/hh_rlhf/llama_instruct_10k/multi_response/multi_response_rewards.npy', help='Multi-response rewards path')
parser.add_argument('--save_path', type=str, default='.', help='Output directory')
args = parser.parse_args()
# Seed
set_seed(args.seed)
# Training sample size (k)
train_sample_size = args.train_size
# Use save_path as output dir
output_dir = args.save_path
best_of_n_dir = os.path.join(args.save_path, "best_of_n")
os.makedirs(output_dir, exist_ok=True)
os.makedirs(best_of_n_dir, exist_ok=True)
# Save config
with open(f"{output_dir}/config.txt", "w") as f:
if train_sample_size > 0:
if train_sample_size.is_integer():
f.write(f"Train size: {int(train_sample_size)}k\n")
else:
f.write(f"Train size: {train_sample_size}k\n")
else:
f.write(f"Train size: all\n")
f.write(f"Batch size: {args.batch_size}\n")
f.write(f"Epochs: {args.epochs}\n")
f.write(f"Learning rate: {args.lr}\n")
f.write(f"Hidden dim: {args.hidden_dim}\n")
f.write(f"Dropout: {args.dropout}\n")
f.write(f"Start time: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
if args.vae_reconstructed_path:
f.write(f"VAE reconstructed path: {args.vae_reconstructed_path}\n")
# Load features
print("Loading features...")
# 加载原始特征
train_features = np.load(args.train_data_path).astype(np.float32)
# Optional VAE reconstructed features
if args.vae_reconstructed_path and args.vae_reconstructed_path.lower() != "none":
print("Loading VAE reconstructed features...")
reconstructed_features = np.load(args.vae_reconstructed_path).astype(np.float32)
print(f"VAE reconstructed shape: {reconstructed_features.shape}")
# Concatenate original and reconstructed
train_features = np.concatenate([train_features, reconstructed_features], axis=0)
print(f"Merged train features shape: {train_features.shape}")
print(f"Train features shape: {train_features.shape}")
# Load multi-response test features (for best-of-n)
multi_response_features_file = args.multi_response_features_path
multi_response_rewards_file = args.multi_response_rewards_path
multi_response_features = np.load(multi_response_features_file).astype(np.float32)
multi_response_rewards = np.load(multi_response_rewards_file).astype(np.float32)
print(f"Multi-response features shape: {multi_response_features.shape}")
print(f"Multi-response rewards shape: {multi_response_rewards.shape}")
# Randomly select validation subset
n_val_samples = 2000
total_samples = multi_response_features.shape[0]
val_indices = np.random.choice(total_samples, n_val_samples, replace=False)
val_multi_response_features = multi_response_features[val_indices]
val_multi_response_rewards = multi_response_rewards[val_indices]
print(f"Val multi-response features shape: {val_multi_response_features.shape}")
print(f"Val multi-response rewards shape: {val_multi_response_rewards.shape}")
# Handle feature dimensionality
if len(train_features.shape) == 3: # 形状为 (样本数, 正/负例, 特征维度)
train_positive_features = train_features[:, 0, :] # 提取所有训练集正例
train_negative_features = train_features[:, 1, :] # 提取所有训练集负例
elif len(train_features.shape) == 4: # 形状为 (样本数, 正/负例, 层数, 特征维度)
train_positive_features = train_features[:, 0, :, :] # 提取所有训练集正例
train_negative_features = train_features[:, 1, :, :] # 提取所有训练集负例
else:
raise ValueError(f"Unsupported train feature shape: {train_features.shape}")
# Subsample training examples per train_sample_size
if train_sample_size > 0:
# Convert k to absolute count
sample_count = int(train_sample_size * 1000)
# Cap to available count
sample_count = min(sample_count, train_positive_features.shape[0])
# Random indices instead of first N
total_samples = train_positive_features.shape[0]
random_indices = np.random.choice(total_samples, sample_count, replace=False)
train_positive_features = train_positive_features[random_indices]
train_negative_features = train_negative_features[random_indices]
print(f"Randomly selected {train_sample_size}k samples for training (actual: {sample_count})")
else:
print(f"Using all training samples: {train_positive_features.shape[0]}")
print(f"Train Positive shape: {train_positive_features.shape}")
print(f"Train Negative shape: {train_negative_features.shape}")
# 获取最后一层特征
last_layer_idx = -1
if len(train_positive_features.shape) == 3: # 形状为 (样本数, 层数, 特征维度)
train_positive_last_layer = train_positive_features[:, last_layer_idx, :]
train_negative_last_layer = train_negative_features[:, last_layer_idx, :]
elif len(train_positive_features.shape) == 2: # 形状为 (样本数, 特征维度)
train_positive_last_layer = train_positive_features
train_negative_last_layer = train_negative_features
else:
raise ValueError(f"不支持的训练特征形状: {train_positive_features.shape}")
print(f"Using last layer features for training")
# 训练奖励模型
print("\nStart training Bradley-Terry reward model...")
# Create model
input_dim = train_positive_last_layer.shape[1]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = RewardModel(input_dim, args.hidden_dim, args.dropout).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# DataLoader
train_dataset = BradleyTerryDataset(train_positive_last_layer, train_negative_last_layer)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
# Training loop
best_val_score = float('-inf') # might be negative initially
patience_counter = 0
early_stopping_patience = 5
best_model_state = None
best_epoch = -1
print("\nBegin training...")
for epoch in range(args.epochs):
# Train
model.train()
train_loss = 0.0
for pos_batch, neg_batch in tqdm(train_loader, desc=f"Epoch {epoch+1}/{args.epochs} [Train]"):
pos_batch, neg_batch = pos_batch.to(device), neg_batch.to(device)
optimizer.zero_grad()
reward_pos = model(pos_batch)
reward_neg = model(neg_batch)
loss = bradley_terry_loss(reward_pos, reward_neg)
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(train_loader)
# Validation via best-of-n
print(f"\nEpoch {epoch+1}: validating with best-of-n...")
model.eval()
# Best-of-n on validation subset
val_results = evaluate_best_of_n(
model,
val_multi_response_features,
golden_rewards=val_multi_response_rewards,
output_dir=os.path.join(output_dir, f"val_epoch_{epoch+1}")
)
val_score = val_results['mean_golden_reward']
print(f"Epoch {epoch+1}: Train Loss = {train_loss:.4f}, Val Best-of-N Score = {val_score:.4f}")
# Early stopping (higher reward is better)
if val_score > best_val_score:
best_val_score = val_score
best_model_state = {k: v.cpu().clone() for k, v in model.state_dict().items()} # 深度复制到CPU
best_epoch = epoch + 1
patience_counter = 0
print(f"New best model found, Best-of-N: {best_val_score:.4f}")
else:
patience_counter += 1
print(f"No improvement, patience: {patience_counter}/{early_stopping_patience}")
if patience_counter >= early_stopping_patience:
print(f"Early stopping triggered at epoch {epoch+1}")
print(f"Best validation Best-of-N: {best_val_score:.4f} (Epoch {best_epoch})")
break
# 保存最佳模型
print(f"\nSaving best model (Epoch {best_epoch}, Val score: {best_val_score:.4f})")
torch.save(best_model_state, os.path.join(output_dir, "best_model.pt"))
print(f"Best model saved to {os.path.join(output_dir, 'best_model.pt')}")
# 创建一个新的模型实例并加载最佳状态用于测试
test_model = RewardModel(input_dim, args.hidden_dim, args.dropout).to(device)
test_model.load_state_dict(best_model_state)
test_model.eval()
# 使用最佳模型在完整测试集上进行最终评估
print("\nEvaluating best-of-n on full test set with best model...")
test_results = evaluate_best_of_n(
test_model, # 使用加载了最佳状态的新模型实例
multi_response_features,
golden_rewards=multi_response_rewards,
output_dir=best_of_n_dir
)
# 保存评估结果到文件
with open(os.path.join(output_dir, "evaluation_results.txt"), "w") as f:
f.write(f"Best-of-N evaluation results:\n")
f.write(f"Num samples: {test_results['n_samples']}\n")
f.write(f"Mean golden reward: {test_results['mean_golden_reward']:.4f}\n")
f.write(f"Std golden reward: {test_results['std_golden_reward']:.4f}\n")
f.write(f"Best epoch: {best_epoch}\n")
f.write(f"Best validation score: {best_val_score:.4f}\n")
# 保存best-of-n的gold reward结果到JSON
gold_reward_results = {
"mean_golden_reward": float(test_results['mean_golden_reward'])+4.95,
"std_golden_reward": float(test_results['std_golden_reward']),
"n_samples": int(test_results['n_samples']),
"best_epoch": int(best_epoch),
"best_validation_score": float(best_val_score)
}
with open(os.path.join(output_dir, "gold_reward_results.json"), "w") as f:
json.dump(gold_reward_results, f, indent=2)
print("\nFinal test results:")
print(f"Best-of-N score: {test_results['mean_golden_reward']:.4f} ± {test_results['std_golden_reward']:.4f}")
# 训练完成后计算Lipschitz常数
print("\nComputing model Lipschitz constant...")
lipschitz_constant = calculate_lipschitz_constant(model)
print(f"Lipschitz constant: {lipschitz_constant:.4f}")
# 保存Lipschitz常数到配置文件
with open(f"{output_dir}/config.txt", "a") as f:
f.write(f"Lipschitz constant: {lipschitz_constant:.4f}\n")
return model
if __name__ == "__main__":
main()