-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_test_model.py
More file actions
146 lines (123 loc) · 5.63 KB
/
train_test_model.py
File metadata and controls
146 lines (123 loc) · 5.63 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
#!/usr/bin/env python
# coding=utf-8
#
# author: Hans-Michael Muller
#
import os
import argparse
from random import shuffle
import evaluate
import numpy as np
import torch
from datasets import Dataset
from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer
from transformers import AutoTokenizer
from transformers import DataCollatorWithPadding
from sklearn.model_selection import StratifiedKFold
def compute_metrics(eval_pred):
accuracy = evaluate.load("accuracy")
f1 = evaluate.load("f1")
recall = evaluate.load("recall")
precision = evaluate.load("precision")
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
acc = accuracy.compute(predictions=predictions, references=labels)
f1_score = f1.compute(predictions=predictions, references=labels, average='binary')
rec = recall.compute(predictions=predictions, references=labels, average='binary')
prec = precision.compute(predictions=predictions, references=labels, average='binary')
return {**f1_score, **prec, **rec, **acc}
def load_dataset_from_file(dataset_path, class_column_num):
# Dataset preparation
dataset = [line.strip().split("\t") for line in open(dataset_path)][1:]
shuffle(dataset)
sentences = [line[0] for line in dataset]
labels = [int(line[class_column_num]) for line in dataset]
return sentences, labels
def main():
# Argument parsing
parser = argparse.ArgumentParser(description="Train a sequence classification model.")
parser.add_argument("dataset_name", type=str, help="Path to the csv dataset.")
parser.add_argument("output_dir_name", type=str, help="Directory to save the model and tokenizer.")
parser.add_argument("model_name", type=str, help="Huggingface model to finetune for classification")
parser.add_argument("num_train_epochs", type=int, help="Number of training epochs.")
args = parser.parse_args()
model_name = args.model_name
for classification_task, classification_column in [('fully_curatable', 1), ('partially_curatable', 2),
('language_related', 3)]:
sentences, labels = load_dataset_from_file(args.dataset_name, classification_column)
tokenizer = AutoTokenizer.from_pretrained(model_name)
encodings = tokenizer(sentences, truncation=True)
encodings["labels"] = torch.tensor(labels)
tokenized_dataset = Dataset.from_dict(encodings)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
training_args = TrainingArguments(
output_dir=os.path.join(args.output_dir_name, classification_task),
learning_rate=2e-5,
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
num_train_epochs=args.num_train_epochs,
weight_decay=0.01,
eval_strategy="steps",
eval_steps=500,
save_strategy="epoch",
)
precision_scores = []
recall_scores = []
f1_scores = []
# Stratified K-Fold Cross-Validation
skf = StratifiedKFold(n_splits=5)
for train_index, test_index in skf.split(tokenized_dataset["input_ids"], tokenized_dataset["labels"]):
train_dataset = tokenized_dataset.select(train_index)
eval_dataset = tokenized_dataset.select(test_index)
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2,
ignore_mismatched_sizes=True)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
trainer.train()
metrics = trainer.evaluate()
precision_scores.append(metrics["eval_precision"])
recall_scores.append(metrics["eval_recall"])
f1_scores.append(metrics["eval_f1"])
# Calculate average metrics
avg_precision = np.mean(precision_scores)
avg_recall = np.mean(recall_scores)
avg_f1 = np.mean(f1_scores)
std_precision = np.std(precision_scores)
std_recall = np.std(recall_scores)
std_f1 = np.std(f1_scores)
print(f"Classification task: {classification_task}")
print(f"Precision scores: {precision_scores}")
print(f"Recall scores: {recall_scores}")
print(f"F1 Scores: {f1_scores}")
print(f"Average Precision: {avg_precision:5.4f} ± {std_precision:5.4f}")
print(f"Average Recall: {avg_recall:5.4f} ± {std_recall:5.4f}")
print(f"Average F1 Score: {avg_f1:5.4f} ± {std_f1:5.4f}")
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2,
ignore_mismatched_sizes=True)
training_args = TrainingArguments(
output_dir=os.path.join(args.output_dir_name, classification_task),
learning_rate=2e-5,
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
num_train_epochs=args.num_train_epochs,
weight_decay=0.01,
save_strategy="epoch",
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset,
tokenizer=tokenizer,
data_collator=data_collator
)
trainer.train()
trainer.save_model()
if __name__ == "__main__":
main()