-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathmodels.py
More file actions
114 lines (85 loc) · 2.89 KB
/
models.py
File metadata and controls
114 lines (85 loc) · 2.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
"""
Pydantic 模型定义 - OpenAI API 兼容格式
"""
from typing import Optional, List, Dict, Any, Literal
from pydantic import BaseModel, Field
import time
# ============ OpenAI 请求模型 ============
class ChatMessage(BaseModel):
"""聊天消息"""
role: Literal["system", "user", "assistant"]
content: str
class ChatCompletionRequest(BaseModel):
"""聊天完成请求 - OpenAI 格式"""
model: str = "llama3.1-8B"
messages: List[ChatMessage]
temperature: Optional[float] = 1.0
top_p: Optional[float] = 1.0
n: Optional[int] = 1
stream: Optional[bool] = False
stop: Optional[List[str]] = None
max_tokens: Optional[int] = None
presence_penalty: Optional[float] = 0
frequency_penalty: Optional[float] = 0
user: Optional[str] = None
# ============ OpenAI 响应模型 ============
class ChatCompletionChoice(BaseModel):
"""聊天完成选择"""
index: int = 0
message: ChatMessage
finish_reason: Optional[str] = "stop"
class Usage(BaseModel):
"""Token 使用量"""
prompt_tokens: int = 0
completion_tokens: int = 0
total_tokens: int = 0
class ChatCompletionResponse(BaseModel):
"""聊天完成响应 - OpenAI 格式"""
id: str = Field(default_factory=lambda: f"chatcmpl-{int(time.time())}")
object: str = "chat.completion"
created: int = Field(default_factory=lambda: int(time.time()))
model: str = "llama3.1-8B"
choices: List[ChatCompletionChoice]
usage: Optional[Usage] = None
# ============ 流式响应模型 ============
class DeltaMessage(BaseModel):
"""增量消息(用于流式响应)"""
role: Optional[str] = None
content: Optional[str] = None
class StreamChoice(BaseModel):
"""流式选择"""
index: int = 0
delta: DeltaMessage
finish_reason: Optional[str] = None
class ChatCompletionChunk(BaseModel):
"""聊天完成块(流式响应)"""
id: str = Field(default_factory=lambda: f"chatcmpl-{int(time.time())}")
object: str = "chat.completion.chunk"
created: int = Field(default_factory=lambda: int(time.time()))
model: str = "llama3.1-8B"
choices: List[StreamChoice]
# ============ 模型列表响应 ============
class ModelInfo(BaseModel):
"""模型信息"""
id: str
object: str = "model"
created: int
owned_by: str
class ModelListResponse(BaseModel):
"""模型列表响应"""
object: str = "list"
data: List[ModelInfo]
# ============ ChatJimmy 请求模型 ============
class ChatOptions(BaseModel):
"""ChatJimmy 聊天选项"""
selectedModel: str = "llama3.1-8B"
systemPrompt: str = ""
topK: int = 8
temperature: Optional[float] = None
topP: Optional[float] = None
maxTokens: Optional[int] = None
class ChatJimmyRequest(BaseModel):
"""ChatJimmy API 请求格式"""
messages: List[Dict[str, str]]
chatOptions: ChatOptions
attachment: Optional[Any] = None