-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathevaluation.py
More file actions
93 lines (73 loc) · 3.47 KB
/
evaluation.py
File metadata and controls
93 lines (73 loc) · 3.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import torch
from transformers.models.opt.modeling_opt import (
OPTAttention,
OPTDecoderLayer,
OPTForCausalLM,
)
from transformers import GPT2Tokenizer, AutoTokenizer, AutoModelForCausalLM
from smoothquant.smooth import smooth_lm
from smoothquant.fake_quant_2_bits import quantize_opt as quantize_opt_2
from smoothquant.fake_quant_4_bits import quantize_opt as quantize_opt_4
from smoothquant.fake_quant_6_bits import quantize_opt as quantize_opt_6
from smoothquant.fake_quant_8_bits import quantize_opt as quantize_opt_8
class Evaluator:
def __init__(self, dataset, tokenizer, device):
self.dataset = dataset
self.tokenizer = tokenizer
self.device = device
# tokenize the dataset
def tokenize_function(examples):
example = self.tokenizer(examples["text"])
return example
self.dataset = self.dataset.map(tokenize_function, batched=True)
self.dataset.set_format(type="torch", columns=["input_ids"])
@torch.no_grad()
def evaluate(self, model):
model.eval()
# The task is to predict the last word of the input.
total, hit = 0, 0
for batch in self.dataset:
input_ids = batch["input_ids"].to(self.device).unsqueeze(0)
label = input_ids[:, -1]
outputs = model(input_ids)
# Check sequence length to avoid IndexError
sequence_length = outputs.logits.size(1)
if sequence_length < 2:
# If the sequence length is less than 2, use the last token
last_token_logits = outputs.logits[:, -1, :]
else:
# Otherwise, use the second-to-last token
last_token_logits = outputs.logits[:, -2, :]
pred = last_token_logits.argmax(dim=-1)
total += label.size(0)
hit += (pred == label).sum().item()
acc = hit / total
return acc
from datasets import load_dataset
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B")
# dataset = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")
# dataset = dataset.select(range(1000)) # 데이터셋에서 처음 1000개를 선택
dataset = load_dataset("lambada", split="validation[:1000]") # LAMBADA 데이터셋
evaluator = Evaluator(dataset, tokenizer, "cuda")
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-0.5B", torch_dtype=torch.float16
).to("cuda:0") # 모델 전체를 cuda:0으로 이동
evaluator = Evaluator(dataset, tokenizer, "cuda:0") # Evaluator에도 같은 디바이스 사용
# opt_model = evaluator.evaluate(model)
# print(f"Qwen2.5 0.5B model accuracy: {opt_model}")
act_scales = torch.load("act_scales/Qwen2.5-0.5b.pt")
smooth_lm(model, act_scales, 0.85)
model_smoothquant = quantize_opt_2(model).to("cuda:0") # SmoothQuant 후에도 디바이스 이동
acc_smoothquant = evaluator.evaluate(model_smoothquant)
print(f"SmoothQuant W8A8 quantized model accuracy: {acc_smoothquant}")
# model = AutoModelForCausalLM.from_pretrained(
# "Qwen/Qwen2.5-0.5B", torch_dtype=torch.float16, device_map="auto"
# )
# opt_model = evaluator.evaluate(model)
# print(f"Qwen2.5 0.5B model accuracy: {opt_model}")
# act_scales = torch.load("act_scales/Qwen2.5-0.5b.pt")
# smooth_lm(model, act_scales, 0.85)
# model_smoothquant = quantize_opt_8(model)
# # print(model_smoothquant_w8a8)
# acc_smoothquant = evaluator.evaluate(model_smoothquant)
# print(f"SmoothQuant W8A8 quantized model accuracy: {acc_smoothquant}")