-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgenerate.py
More file actions
465 lines (396 loc) · 18.2 KB
/
generate.py
File metadata and controls
465 lines (396 loc) · 18.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
import argparse
import json
from typing import Dict, List
import random
from datetime import datetime, timedelta
from pathlib import Path
from datasets import load_dataset, Dataset
from tqdm import tqdm
from llm_utils import load_model, get_sampling_params, LiteLLMWrapper
def get_abstract_instruction(reasoning: bool = False) -> str:
if not reasoning:
return "Please generate an informative abstract for the above doctoral dissertation that follows academic standards and is approximately 400 words. Do not add extra comments or explanations. Reply with only the abstract."
else:
return "Please generate an informative abstract for the above doctoral dissertation that follows academic standards and is approximately 400 words. Do not add extra comments or explanations. Think step by step and then reply with only the abstract."
SYSTEM_PROMPT = {"role": "system", "content": "You are an academic assistant that generates concise doctoral dissertation abstracts."}
def create_abstract_prompt(clean_text: str, use_chat_template: bool = False, reasoning: bool = False) -> str:
if use_chat_template:
messages = []
if not reasoning:
messages.append(SYSTEM_PROMPT)
messages.append({"role": "user", "content": f"""<Doctoral Dissertation Start>
{clean_text}
<Doctoral Dissertation End>
{get_abstract_instruction(reasoning)}
"""})
return messages
else:
prompt = f"""<Doctoral Dissertation Start>
{clean_text}
<Dissertation End>
{get_abstract_instruction(reasoning)}
Abstract:
"""
return prompt
def prepare_prompts(examples: List[Dict], use_chat_template: bool = False, reasoning: bool = False) -> List[str]:
"""Prepare prompts from dataset examples."""
prompts = []
for ex in examples:
if 'clean_text' not in ex:
raise KeyError("Dataset must contain 'clean_text' field")
prompt = create_abstract_prompt(ex['clean_text'], use_chat_template, reasoning)
prompts.append(prompt)
return prompts
def load_dataset_split(split: str, dataset_path: str = None) -> Dataset:
"""Load and prepare the dataset."""
if not dataset_path:
raise ValueError("Dataset path is required")
else:
print(f"Loading dataset from: {dataset_path}")
try:
dataset = load_dataset("parquet", data_files=dataset_path, split='train')
# Verify dataset has required fields
if not all(field in dataset[0] for field in ['clean_text', 'abstract']):
raise ValueError("Dataset must contain 'clean_text' and 'abstract' fields")
return dataset
except Exception as e:
raise Exception(f"Error loading dataset from {dataset_path}: {str(e)}")
def prepare_dataset_subset(dataset: Dataset, limit: int = None, random_sample: bool = False) -> Dataset:
"""Prepare a subset of the dataset based on limit and random sampling preferences."""
if limit is None or limit == 0:
return dataset
if random_sample:
# Generate random indices without replacement
indices = random.sample(range(len(dataset)), min(limit, len(dataset)))
return dataset.select(indices)
else:
# Take first n examples
return dataset.select(range(min(limit, len(dataset))))
def run_benchmark(
dataset: Dataset,
engine,
temperature: float = None,
max_tokens: int = 1000,
model_name: str = None,
limit: int = None,
random_sample: bool = False,
output_file: Path = None,
use_chat_template: bool = False,
prefilled_assistant_message: bool = False,
reasoning: bool = False,
comment: str = None
) -> Path:
"""Run the benchmark on the given model and dataset."""
dataset = prepare_dataset_subset(dataset, limit, random_sample)
prompts = prepare_prompts(dataset, use_chat_template, reasoning)
all_results = []
temperature = temperature or get_default_temperature(engine, max_tokens)
metadata = {
"model_name": model_name,
"temperature": temperature,
"max_tokens": max_tokens,
"use_chat_template": use_chat_template,
"prefilled_assistant_message": prefilled_assistant_message,
"reasoning": reasoning,
"timestamp": datetime.now().isoformat(),
"random_sample": random_sample,
"sample_size": len(dataset),
"comment": comment,
"sample_input": create_abstract_prompt("Dissertation would be here", use_chat_template, reasoning)
}
# Execute generation based on engine type
if isinstance(engine, LiteLLMWrapper):
process_litellm_generation(engine, dataset, prompts, metadata,
use_chat_template, prefilled_assistant_message,
max_tokens, temperature, all_results)
else:
process_vllm_generation(engine, dataset, prompts, metadata,
use_chat_template, prefilled_assistant_message,
reasoning, max_tokens, temperature, all_results)
write_output_file(output_file, metadata, all_results)
return output_file
def get_default_temperature(engine, max_tokens: int) -> float:
"""Get default temperature from sampling parameters."""
sampling_params = get_sampling_params(engine, max_tokens, None)
return getattr(sampling_params, 'temperature', 0.0)
def process_litellm_generation(engine, dataset, prompts, metadata,
use_chat_template, prefilled_assistant_message,
max_tokens, temperature, all_results):
"""Handle LiteLLM API generation workflow."""
sampling_params = get_sampling_params(engine, max_tokens, temperature)
for i in tqdm(range(len(prompts)), desc="Generating abstracts"):
try:
messages = prepare_messages(
prompts[i],
use_chat_template,
prefilled_assistant_message
)
response = engine.chat(
messages=messages,
sampling_params=sampling_params
)[0]
all_results.append(create_result_entry(
dataset[i],
response.choices[0].message.content,
getattr(response.choices[0].message, 'reasoning_content', None)
))
except Exception as e:
handle_generation_error(e)
def process_vllm_generation(engine, dataset, prompts, metadata,
use_chat_template, prefilled_assistant_message,
reasoning, max_tokens, temperature, all_results):
"""Handle vLLM generation workflow."""
sampling_params = get_sampling_params(engine, max_tokens, temperature)
print("Sampling params:", sampling_params)
try:
if use_chat_template:
prompts = prepare_chat_prompts(
prompts,
prefilled_assistant_message,
reasoning
)
outputs = (
engine.chat(
messages=prompts,
sampling_params=sampling_params,
use_tqdm=True,
add_generation_prompt=not prefilled_assistant_message and not reasoning,
continue_final_message=prefilled_assistant_message or reasoning,
chat_template_content_format="string"
)
if use_chat_template
else engine.generate(prompts, sampling_params, use_tqdm=True)
)
process_vllm_outputs(outputs, dataset, all_results)
except Exception as e:
handle_generation_error(e, full_traceback=True)
def prepare_messages(prompt, use_chat_template, prefilled_assistant):
"""Prepare messages array for API calls."""
if not use_chat_template:
return [{"role": "user", "content": prompt}]
messages = prompt.copy() # prompt is already a messages list
if prefilled_assistant:
messages.append({"role": "assistant", "content": "Abstract: "})
return messages
def prepare_chat_prompts(prompts, prefilled_assistant, reasoning):
"""Add special tokens to chat prompts when needed."""
modified = []
for p in prompts:
new_p = p.copy()
if prefilled_assistant:
new_p.append({"role": "assistant", "content": "Abstract: "})
if reasoning:
new_p.append({"role": "assistant", "content": "<think>\n"})
modified.append(new_p)
return modified
def process_vllm_outputs(outputs, dataset, all_results):
"""Process and validate vLLM outputs."""
for idx, output in enumerate(outputs):
if idx >= len(dataset):
break
generated_text = output.outputs[0].text
reasoning_content = extract_reasoning_content(generated_text)
all_results.append(create_result_entry(
dataset[idx],
generated_text.split("</think>")[1].strip() if "</think>" in generated_text else generated_text,
reasoning_content
))
def create_result_entry(dataset_item, generated_text, reasoning_content):
"""Create standardized result entry."""
return {
"idn": dataset_item['id'],
"category": dataset_item['category'],
"total_text_length": dataset_item['total_text_length'],
"total_token_length": dataset_item['token_length'],
"abstract_length": dataset_item['abstract_length'],
"generated_abstract": generated_text,
"reasoning_content": reasoning_content,
"original_abstract": dataset_item['abstract'],
"url_dnb_archive": dataset_item['url_dnb_archive']
}
def extract_reasoning_content(generated_text: str) -> str:
"""Extract reasoning content from generated text if present."""
if "</think>" not in generated_text:
return None
return generated_text.split("</think>")[0].replace("<think>", "").strip()
def handle_generation_error(error: Exception, full_traceback: bool = False):
"""Handle generation errors with consistent logging."""
print(f"Error during generation: {str(error)}")
if full_traceback:
import traceback
print("Traceback:")
print(traceback.format_exc())
def write_output_file(output_file: Path, metadata: dict, results: list):
"""Write results to output file with proper formatting."""
with open(output_file, "w") as f:
json.dump({
"generation_metadata": metadata,
"generations": results
}, f, indent=2)
# Helper functions for main ----------------------------------------------------
def create_temperature_configs(temperatures: list, max_tokens: int, limit: int) -> list:
"""Generate temperature experiment configurations."""
return [{
"temperature": temp,
"max_tokens": max_tokens,
"limit": limit,
"use_chat_template": False,
"prefilled_assistant_message": False,
"reasoning": False,
"comment": f"temperature={temp}"
} for temp in temperatures]
def should_skip_variation(variation: str, temperatures: list) -> bool:
"""Determine if a chat template variation should be skipped."""
return variation == "none" and temperatures
def create_chat_template_config(variation: str, base_temp: float, max_tokens: int, limit: int) -> dict:
"""Create configuration for chat template experiments."""
return {
"temperature": base_temp,
"max_tokens": max_tokens,
"limit": limit,
"use_chat_template": variation in ["template", "template+prefill", "template+reasoning"],
"prefilled_assistant_message": variation == "template+prefill",
"reasoning": variation == "template+reasoning",
"comment": f"chat_template={variation}"
}
def apply_config_comments(configs: list, comments: list):
"""Apply custom comments to experiment configurations."""
for config, comment in zip(configs, comments):
config["comment"] = comment
def create_default_config(max_tokens: int, limit: int) -> dict:
"""Create fallback default configuration."""
return {
"temperature": None,
"max_tokens": max_tokens,
"limit": limit,
"use_chat_template": False,
"prefilled_assistant_message": False,
"reasoning": False,
"comment": "default configuration"
}
def create_output_file(output_dir: Path, model: str, config: dict) -> Path:
"""Generate unique output filename with incremental numbering."""
model_base = model.split('/')[-1]
existing = list(output_dir.glob(f"{model_base}_*.json"))
next_num = max((int(f.stem.split('_')[-1]) for f in existing), default=0) + 1
return output_dir / f"{model_base}_{next_num}.json"
def format_timedelta(delta: timedelta) -> str:
"""Format timedelta into human-readable string."""
total_sec = delta.total_seconds()
hours = int(total_sec // 3600)
minutes = int((total_sec % 3600) // 60)
seconds = int(total_sec % 60)
return f"{hours}h {minutes}m {seconds}s"
def print_config(config: dict):
"""Pretty-print experiment configuration."""
print("📝 Configuration:")
for key, value in config.items():
if value is not None:
print(f" - {key.replace('_', ' ').title():<25}: {value}")
def main():
# Argument parsing ---------------------------------------------------------
parser = argparse.ArgumentParser(
description="Run LLM benchmark for dissertation abstract generation",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# Core configuration
parser.add_argument("--model", "-m",
type=str, default="openai/gpt-4",
help="HuggingFace or OpenAI model name (prefix with 'openai/' for OpenAI models)")
parser.add_argument("--split", "-s",
type=str, default="test",
choices=["train", "validation", "test"],
help="Dataset split to evaluate on")
parser.add_argument("--output-dir", "-o",
type=str, default="results",
help="Directory to save benchmark results")
# Experimental parameters
experiment_group = parser.add_argument_group("Experiment configuration")
experiment_group.add_argument("--temperatures", "-t",
type=float, nargs="+",
help="Temperature values to test (comma separated)")
experiment_group.add_argument("--max-tokens", "-mt",
type=int, default=1000,
help="Maximum tokens per generation")
experiment_group.add_argument("--chat-template-variations", "-ct",
type=str, nargs="+",
choices=["none", "template", "template+prefill", "template+reasoning"],
default=["none"],
help="Chat template formatting variations to test")
experiment_group.add_argument("--comments", "-c",
type=str, nargs="+",
help="Descriptive comments for each experiment configuration")
# Runtime configuration
parser.add_argument("--batch-size", "-b",
type=int, default=1,
help="Inference batch size (for compatible engines)")
parser.add_argument("--limit", "-l",
type=int,
help="Maximum number of examples to process")
parser.add_argument("--dataset-path", "-d",
type=str,
help="Path to custom dataset parquet file")
parser.add_argument("--api",
action="store_true",
help="Use API-based models via LiteLLM")
args = parser.parse_args()
# Initial setup ------------------------------------------------------------
output_dir = Path(args.output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
# Load core components
print(f"🔧 Loading model: {args.model}")
engine = load_model(args.model, use_api=args.api)
print(f"📂 Loading dataset: {args.split}")
dataset = load_dataset_split(
args.split,
dataset_path=args.dataset_path
)
# Configure experiment variations ------------------------------------------
run_configs = []
# Temperature-based experiments
if args.temperatures:
run_configs.extend(
create_temperature_configs(args.temperatures, args.max_tokens, args.limit)
)
# Chat template experiments
for variation in args.chat_template_variations:
if should_skip_variation(variation, args.temperatures):
continue
run_configs.append(
create_chat_template_config(
variation,
base_temp=args.temperatures[0] if args.temperatures else None,
max_tokens=args.max_tokens,
limit=args.limit
)
)
# Apply custom comments if provided
if args.comments:
apply_config_comments(run_configs, args.comments)
# Ensure at least one configuration exists
if not run_configs:
run_configs.append(create_default_config(args.max_tokens, args.limit))
# Execute experiments ------------------------------------------------------
total_start = datetime.now()
print(f"\n🚀 Starting {len(run_configs)} experiment configurations")
for run_idx, config in enumerate(run_configs, 1):
run_start = datetime.now()
print(f"\n=== Experiment {run_idx}/{len(run_configs)} ===")
print_config(config)
# Create unique output filename
output_file = create_output_file(output_dir, args.model, config)
# Execute benchmark
run_benchmark(
dataset=dataset,
engine=engine,
output_file=output_file,
**config
)
# Report timing
print(f"⏱️ Experiment duration: {format_timedelta(datetime.now() - run_start)}")
print(f"💾 Results saved to: {output_file}")
# Final report -------------------------------------------------------------
print(f"\nAll experiments completed in {format_timedelta(datetime.now() - total_start)}")
print(f"Output directory: {output_dir.resolve()}")
if __name__ == "__main__":
main()