Delete request_converter.py
Browse files- request_converter.py +0 -185
request_converter.py
DELETED
|
@@ -1,185 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
请求转换器模块
|
| 3 |
-
处理OpenAI格式和Warp格式之间的转换
|
| 4 |
-
"""
|
| 5 |
-
from typing import List, Dict, Any, Optional, Tuple
|
| 6 |
-
from dataclasses import dataclass
|
| 7 |
-
|
| 8 |
-
from utils import Utils
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
@dataclass
|
| 12 |
-
class OpenAIMessage:
|
| 13 |
-
"""OpenAI消息格式"""
|
| 14 |
-
role: str
|
| 15 |
-
content: str
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
@dataclass
|
| 19 |
-
class OpenAIRequest:
|
| 20 |
-
"""OpenAI请求格式"""
|
| 21 |
-
model: str
|
| 22 |
-
messages: List[OpenAIMessage]
|
| 23 |
-
stream: bool = False
|
| 24 |
-
temperature: float = 1.0
|
| 25 |
-
max_tokens: Optional[int] = None
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
@dataclass
|
| 29 |
-
class OpenAIChoice:
|
| 30 |
-
"""OpenAI响应选择"""
|
| 31 |
-
index: int
|
| 32 |
-
message: Optional[OpenAIMessage] = None
|
| 33 |
-
delta: Optional[Dict[str, Any]] = None
|
| 34 |
-
finish_reason: Optional[str] = None
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
@dataclass
|
| 38 |
-
class OpenAIResponse:
|
| 39 |
-
"""OpenAI响应格式"""
|
| 40 |
-
id: str
|
| 41 |
-
object: str
|
| 42 |
-
created: int
|
| 43 |
-
model: str
|
| 44 |
-
choices: List[OpenAIChoice]
|
| 45 |
-
usage: Optional[Dict[str, int]] = None
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
class RequestConverter:
|
| 49 |
-
"""OpenAI请求到Warp请求的转换器"""
|
| 50 |
-
|
| 51 |
-
@staticmethod
|
| 52 |
-
def parse_openai_request(request_data: dict) -> OpenAIRequest:
|
| 53 |
-
"""解析OpenAI请求格式"""
|
| 54 |
-
messages = []
|
| 55 |
-
for msg in request_data.get("messages", []):
|
| 56 |
-
content = msg.get("content", "")
|
| 57 |
-
|
| 58 |
-
# 处理可能的列表格式内容
|
| 59 |
-
if isinstance(content, list):
|
| 60 |
-
text_content = ""
|
| 61 |
-
for item in content:
|
| 62 |
-
if isinstance(item, dict) and item.get("type") == "text":
|
| 63 |
-
text_content += item.get("text", "")
|
| 64 |
-
content = text_content
|
| 65 |
-
|
| 66 |
-
messages.append(OpenAIMessage(
|
| 67 |
-
role=msg.get("role", "user"),
|
| 68 |
-
content=str(content)
|
| 69 |
-
))
|
| 70 |
-
|
| 71 |
-
return OpenAIRequest(
|
| 72 |
-
model=request_data.get("model", "gemini-2.0-flash"),
|
| 73 |
-
messages=messages,
|
| 74 |
-
stream=request_data.get("stream", False),
|
| 75 |
-
temperature=request_data.get("temperature", 1.0),
|
| 76 |
-
max_tokens=request_data.get("max_tokens")
|
| 77 |
-
)
|
| 78 |
-
|
| 79 |
-
@staticmethod
|
| 80 |
-
def process_message_history(messages: List[Dict]) -> Tuple[List[Dict], str]:
|
| 81 |
-
"""处理消息历史,返回(合并后的消息组列表, 当前消息)"""
|
| 82 |
-
if not messages:
|
| 83 |
-
return [], "Hello"
|
| 84 |
-
|
| 85 |
-
# 确保最后一句话是user
|
| 86 |
-
if messages[-1].role != "user":
|
| 87 |
-
raise ValueError("最后一句话必须是user消息")
|
| 88 |
-
|
| 89 |
-
# 当前消息(最后一句)
|
| 90 |
-
current_message = messages[-1].content
|
| 91 |
-
|
| 92 |
-
if len(messages) == 1:
|
| 93 |
-
return [], current_message
|
| 94 |
-
|
| 95 |
-
# 处理除最后一句之外的所有消息,合并连续相同的role
|
| 96 |
-
merged_groups = []
|
| 97 |
-
i = 0
|
| 98 |
-
|
| 99 |
-
while i < len(messages) - 1: # 排除最后一句
|
| 100 |
-
current_role = messages[i].role
|
| 101 |
-
|
| 102 |
-
# 如果是system,转换为user
|
| 103 |
-
if current_role == "system":
|
| 104 |
-
current_role = "user"
|
| 105 |
-
|
| 106 |
-
# 收集当前内容
|
| 107 |
-
current_content = messages[i].content
|
| 108 |
-
|
| 109 |
-
# 合并连续相同的role
|
| 110 |
-
j = i + 1
|
| 111 |
-
while j < len(messages) - 1: # 排除最后一句
|
| 112 |
-
next_role = messages[j].role
|
| 113 |
-
if next_role == "system":
|
| 114 |
-
next_role = "user"
|
| 115 |
-
|
| 116 |
-
if next_role == current_role:
|
| 117 |
-
# 合并内容
|
| 118 |
-
current_content += "\n" + messages[j].content
|
| 119 |
-
j += 1
|
| 120 |
-
else:
|
| 121 |
-
break
|
| 122 |
-
|
| 123 |
-
# 添加合并后的消息组
|
| 124 |
-
merged_groups.append({"role": current_role, "content": current_content})
|
| 125 |
-
i = j
|
| 126 |
-
|
| 127 |
-
return merged_groups, current_message
|
| 128 |
-
|
| 129 |
-
@staticmethod
|
| 130 |
-
def create_openai_response(content: str, model: str, request_id: str,
|
| 131 |
-
is_stream: bool = False, finish_reason: str = None) -> dict:
|
| 132 |
-
"""创建OpenAI格式的响应"""
|
| 133 |
-
if is_stream:
|
| 134 |
-
choice = {
|
| 135 |
-
"index": 0,
|
| 136 |
-
"delta": {"content": content} if content else {},
|
| 137 |
-
"finish_reason": finish_reason
|
| 138 |
-
}
|
| 139 |
-
|
| 140 |
-
return {
|
| 141 |
-
"id": request_id,
|
| 142 |
-
"object": "chat.completion.chunk",
|
| 143 |
-
"created": Utils.get_current_timestamp(),
|
| 144 |
-
"model": model,
|
| 145 |
-
"choices": [choice]
|
| 146 |
-
}
|
| 147 |
-
else:
|
| 148 |
-
choice = {
|
| 149 |
-
"index": 0,
|
| 150 |
-
"message": {
|
| 151 |
-
"role": "assistant",
|
| 152 |
-
"content": content
|
| 153 |
-
},
|
| 154 |
-
"finish_reason": "stop"
|
| 155 |
-
}
|
| 156 |
-
|
| 157 |
-
return {
|
| 158 |
-
"id": request_id,
|
| 159 |
-
"object": "chat.completion",
|
| 160 |
-
"created": Utils.get_current_timestamp(),
|
| 161 |
-
"model": model,
|
| 162 |
-
"choices": [choice],
|
| 163 |
-
"usage": {
|
| 164 |
-
"prompt_tokens": 100,
|
| 165 |
-
"completion_tokens": len(content.split()) if content else 0,
|
| 166 |
-
"total_tokens": 100 + (len(content.split()) if content else 0)
|
| 167 |
-
}
|
| 168 |
-
}
|
| 169 |
-
|
| 170 |
-
@staticmethod
|
| 171 |
-
def create_error_response(error_message: str, model: str, request_id: str = None) -> dict:
|
| 172 |
-
"""创建错误响应"""
|
| 173 |
-
if not request_id:
|
| 174 |
-
request_id = Utils.generate_request_id()
|
| 175 |
-
|
| 176 |
-
return {
|
| 177 |
-
"id": request_id,
|
| 178 |
-
"object": "error",
|
| 179 |
-
"created": Utils.get_current_timestamp(),
|
| 180 |
-
"model": model,
|
| 181 |
-
"error": {
|
| 182 |
-
"message": error_message,
|
| 183 |
-
"type": "invalid_request_error"
|
| 184 |
-
}
|
| 185 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|