-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllm_evaluator.py
More file actions
438 lines (356 loc) · 14.8 KB
/
llm_evaluator.py
File metadata and controls
438 lines (356 loc) · 14.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
# llm_evaluator.py
import requests
import json
from typing import List, Dict
import time
class LLMEvaluator:
"""使用LLM评价和改进搜索引擎"""
def __init__(self, search_engine, api_token=None, model_name="Qwen/Qwen2.5-72B-Instruct", api_url="https://api.siliconflow.cn/v1/chat/completions"):
"""
初始化LLM评价器
Args:
search_engine: 搜索引擎实例
api_token: 硅基流动API Token
model_name: 使用的LLM模型名称
api_url: API地址
"""
self.search_engine = search_engine
self.api_token = api_token
self.model_name = model_name
self.api_url = api_url
def call_llm(self, prompt: str, system_prompt: str = None, api_token: str = None, model_name: str = None, api_url: str = None) -> str:
"""调用LLM API"""
try:
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": prompt})
payload = {
"model": model_name or self.model_name,
"messages": messages,
"temperature": 0.1, # 降低温度,提高响应速度和确定性
"max_tokens": 800, # 减少最大token数,加快生成速度
"stream": False
}
token = api_token or self.api_token
url = api_url or self.api_url
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
response = requests.post(url, json=payload, headers=headers, timeout=60) # 增加超时时间
if response.status_code == 200:
data = response.json()
return data['choices'][0]['message']['content']
else:
return f"API调用失败: {response.status_code}"
except Exception as e:
return f"调用LLM时发生错误: {str(e)}"
def evaluate_single_result(self, query: str, result: Dict, api_token: str = None, model_name: str = None, api_url: str = None) -> Dict:
"""评价单个搜索结果"""
prompt = f"""请评价以下搜索结果与查询的相关性。
查询: {query}
搜索结果:
- 标题: {result['title']}
- URL: {result['url']}
- 内容预览: {result.get('preview', '无')}
请从以下维度快速评价(1-5分):
1. 相关性 2. 准确性 3. 质量
直接返回JSON(无需其他文字):
{{
"relevance": 分数,
"accuracy": 分数,
"quality": 分数,
"explanation": "一句话说明"
}}"""
system_prompt = "你是一个搜索引擎质量评估专家,擅长评价搜索结果的质量。"
response = self.call_llm(prompt, system_prompt, api_token=api_token, model_name=model_name, api_url=api_url)
try:
# 尝试解析JSON
import re
json_match = re.search(r'\{.*\}', response, re.DOTALL)
if json_match:
evaluation = json.loads(json_match.group())
return evaluation
else:
return {
"relevance": 0,
"accuracy": 0,
"quality": 0,
"explanation": response
}
except:
return {
"relevance": 0,
"accuracy": 0,
"quality": 0,
"explanation": response
}
def evaluate_search_results(self, query: str, top_k: int = 10, api_token: str = None, model_name: str = None, api_url: str = None) -> Dict:
"""
评价搜索结果质量
Args:
query: 查询内容
top_k: 评价前k个结果
api_token: 临时使用的API Token
Returns:
评价结果字典
"""
print(f"\n{'='*60}")
print(f"LLM评价器 - 评价查询: {query}")
print('='*60)
# 获取搜索结果
print(f"正在获取搜索结果...")
results = self.search_engine.search(query, top_k=top_k, use_ltr=True)
if not results:
return {
'query': query,
'success': False,
'message': '没有找到搜索结果'
}
# 评价每个结果
evaluations = []
print(f"正在评价 {len(results)} 个搜索结果...\n")
for idx, (url, score, title, html_content) in enumerate(results, 1):
print(f"评价结果 {idx}/{len(results)}: {title[:50]}...")
# 提取预览文本
from bs4 import BeautifulSoup
try:
soup = BeautifulSoup(html_content, 'html.parser')
text = soup.get_text()
text = ' '.join(text.split())
preview = text[:200]
except:
preview = ""
result_info = {
'rank': idx,
'url': url,
'title': title,
'score': score,
'preview': preview
}
# LLM评价
evaluation = self.evaluate_single_result(query, result_info, api_token=api_token, model_name=model_name, api_url=api_url)
evaluation['result'] = result_info
evaluations.append(evaluation)
# 计算平均分
avg_relevance = sum(e['relevance'] for e in evaluations) / len(evaluations)
avg_accuracy = sum(e['accuracy'] for e in evaluations) / len(evaluations)
avg_quality = sum(e['quality'] for e in evaluations) / len(evaluations)
result = {
'query': query,
'success': True,
'total_results': len(results),
'evaluations': evaluations,
'average_scores': {
'relevance': round(avg_relevance, 2),
'accuracy': round(avg_accuracy, 2),
'quality': round(avg_quality, 2),
'overall': round((avg_relevance + avg_accuracy + avg_quality) / 3, 2)
}
}
print(f"\n评价完成!")
print(f"平均相关性: {avg_relevance:.2f}/5")
print(f"平均准确性: {avg_accuracy:.2f}/5")
print(f"平均质量: {avg_quality:.2f}/5")
print(f"总体得分: {result['average_scores']['overall']:.2f}/5")
print('='*60 + "\n")
return result
def evaluate_search_results_stream(self, query: str, top_k: int = 10, api_token: str = None, model_name: str = None, api_url: str = None):
"""
流式评价搜索结果质量(生成器函数,用于SSE)
Args:
query: 查询内容
top_k: 评价前k个结果
api_token: 临时使用的API Token
model_name: 模型名称
api_url: API地址
Yields:
事件字典,包含type和相关数据
"""
print(f"\n{'='*60}")
print(f"LLM评价器 - 流式评价查询: {query}")
print('='*60)
# 1. 获取搜索结果
yield {'type': 'progress', 'message': '正在获取搜索结果...'}
print(f"正在获取搜索结果...")
results = self.search_engine.search(query, top_k=top_k, use_ltr=True)
if not results:
yield {
'type': 'error',
'message': '没有找到搜索结果'
}
return
# 2. 开始评价
yield {
'type': 'progress',
'message': f'正在评价 {len(results)} 个搜索结果...'
}
print(f"正在评价 {len(results)} 个搜索结果...\n")
evaluations = []
# 3. 逐个评价结果
for idx, (url, score, title, html_content) in enumerate(results, 1):
# 发送进度
yield {
'type': 'progress',
'message': f'评价结果 {idx}/{len(results)}: {title[:30]}...',
'current': idx,
'total': len(results)
}
print(f"评价结果 {idx}/{len(results)}: {title[:50]}...")
# 提取预览文本
from bs4 import BeautifulSoup
try:
soup = BeautifulSoup(html_content, 'html.parser')
text = soup.get_text()
text = ' '.join(text.split())
preview = text[:200]
except:
preview = ""
result_info = {
'rank': idx,
'url': url,
'title': title,
'score': score,
'preview': preview
}
# LLM评价
evaluation = self.evaluate_single_result(query, result_info, api_token=api_token, model_name=model_name, api_url=api_url)
evaluation['result'] = result_info
evaluations.append(evaluation)
# 4. 计算平均分
avg_relevance = sum(e['relevance'] for e in evaluations) / len(evaluations)
avg_accuracy = sum(e['accuracy'] for e in evaluations) / len(evaluations)
avg_quality = sum(e['quality'] for e in evaluations) / len(evaluations)
result = {
'query': query,
'success': True,
'total_results': len(results),
'evaluations': evaluations,
'average_scores': {
'relevance': round(avg_relevance, 2),
'accuracy': round(avg_accuracy, 2),
'quality': round(avg_quality, 2),
'overall': round((avg_relevance + avg_accuracy + avg_quality) / 3, 2)
}
}
# 5. 发送完整结果
yield {
'type': 'result',
'data': result
}
print(f"\n评价完成!")
print(f"平均相关性: {avg_relevance:.2f}/5")
print(f"平均准确性: {avg_accuracy:.2f}/5")
print(f"平均质量: {avg_quality:.2f}/5")
print(f"总体得分: {result['average_scores']['overall']:.2f}/5")
print('='*60 + "\n")
# 6. 发送完成信号
yield {'type': 'done'}
def generate_improvement_suggestions(self, evaluation_results: List[Dict], api_token: str = None, model_name: str = None, api_url: str = None) -> str:
"""基于评价结果生成改进建议"""
# 整理评价结果摘要
summary = []
for result in evaluation_results:
if result['success']:
summary.append(f"""
查询: {result['query']}
总体得分: {result['average_scores']['overall']}/5
- 相关性: {result['average_scores']['relevance']}/5
- 准确性: {result['average_scores']['accuracy']}/5
- 质量: {result['average_scores']['quality']}/5
低分结果示例:
{self._get_low_score_examples(result['evaluations'])}
""")
summary_text = "\n".join(summary)
prompt = f"""作为搜索引擎优化专家,请基于以下LLM评价结果,提出具体的改进建议。
评价结果摘要:
{summary_text}
请从以下方面提出改进建议:
1. 排序算法优化
2. 特征工程改进
3. 索引质量提升
4. 查询理解增强
5. 其他相关建议
请给出具体、可操作的建议,并说明预期效果。"""
system_prompt = "你是一个搜索引擎优化专家,擅长分析搜索引擎的问题并提出改进方案。"
print("正在生成改进建议...")
suggestions = self.call_llm(prompt, system_prompt, api_token=api_token, model_name=model_name, api_url=api_url)
return suggestions
def _get_low_score_examples(self, evaluations: List[Dict], threshold: float = 3.0) -> str:
"""获取低分结果示例"""
low_score_results = []
for eval_item in evaluations:
avg_score = (eval_item['relevance'] + eval_item['accuracy'] + eval_item['quality']) / 3
if avg_score < threshold:
result = eval_item['result']
low_score_results.append(
f" - 排名#{result['rank']}: {result['title'][:50]} (得分:{avg_score:.1f}/5)"
)
return "\n".join(low_score_results[:3]) if low_score_results else " (无低分结果)"
def comprehensive_evaluation(self, test_queries: List[str]) -> Dict:
"""
综合评价搜索引擎
Args:
test_queries: 测试查询列表
Returns:
综合评价结果
"""
print("\n" + "="*60)
print("开始搜索引擎综合评价")
print("="*60)
all_results = []
for query in test_queries:
result = self.evaluate_search_results(query, top_k=5)
all_results.append(result)
# 生成改进建议
suggestions = self.generate_improvement_suggestions(all_results)
# 整理最终报告
report = {
'timestamp': time.strftime('%Y-%m-%d %H:%M:%S'),
'test_queries': test_queries,
'evaluation_results': all_results,
'improvement_suggestions': suggestions,
'model_used': self.model_name
}
print("\n" + "="*60)
print("综合评价完成")
print("="*60)
print("\n改进建议:")
print(suggestions)
print("\n" + "="*60 + "\n")
return report
def test_llm_evaluator():
"""测试LLM评价系统"""
from ss import SearchEngine
import os
import pickle
# 初始化搜索引擎
print("正在初始化搜索引擎...")
engine = SearchEngine()
engine.load_index('search_index.pkl')
engine.load_pagerank('pagerank_results.json')
if os.path.exists('ltr_model.pkl'):
with open('ltr_model.pkl', 'rb') as f:
model_data = pickle.load(f)
engine.ltr_model = model_data['model']
engine.scaler = model_data['scaler']
# 初始化评价器
print("正在初始化LLM评价器...\n")
evaluator = LLMEvaluator(engine)
# 测试查询
test_queries = [
"中国人民大学",
"人大新闻学院",
"研究生招生",
"图书馆",
"校园活动"
]
# 执行综合评价
report = evaluator.comprehensive_evaluation(test_queries)
# 保存评价报告
with open('llm_evaluation_results.json', 'w', encoding='utf-8') as f:
json.dump(report, f, ensure_ascii=False, indent=2)
print("评价报告已保存到 llm_evaluation_results.json")
if __name__ == '__main__':
test_llm_evaluator()