Tyb7654 commited on
Commit
4295924
·
verified ·
1 Parent(s): f6bc568

Upload 2 files

Browse files
Files changed (2) hide show
  1. appigence_api.py +432 -0
  2. requirements.txt +6 -0
appigence_api.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # appigence_api.py
2
+ import json
3
+ import os
4
+ import time
5
+ import uuid
6
+ import asyncio
7
+ from typing import Any, Dict, List, Optional, AsyncGenerator
8
+ from contextlib import asynccontextmanager
9
+
10
+ import httpx
11
+ from fastapi import FastAPI, HTTPException, Depends, BackgroundTasks
12
+ from fastapi.responses import StreamingResponse
13
+ from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
14
+ from fastapi.middleware.cors import CORSMiddleware
15
+ from pydantic import BaseModel, Field
16
+ import uvicorn
17
+
18
+
19
+ # ========== 数据模型 - 遵循DRY原则 ==========
20
+ class ChatMessage(BaseModel):
21
+ """OpenAI格式的消息"""
22
+ role: str
23
+ content: str
24
+
25
+
26
+ class ChatCompletionRequest(BaseModel):
27
+ """OpenAI格式的聊天请求"""
28
+ model: str
29
+ messages: List[ChatMessage]
30
+ stream: bool = False
31
+ temperature: Optional[float] = 0.7
32
+ max_tokens: Optional[int] = None
33
+
34
+
35
+ class StreamChoice(BaseModel):
36
+ """流式响应选项"""
37
+ delta: Dict[str, Any] = Field(default_factory=dict)
38
+ index: int = 0
39
+ finish_reason: Optional[str] = None
40
+
41
+
42
+ class StreamResponse(BaseModel):
43
+ """OpenAI格式的流式响应"""
44
+ id: str = Field(default_factory=lambda: f"chatcmpl-{uuid.uuid4().hex}")
45
+ object: str = "chat.completion.chunk"
46
+ created: int = Field(default_factory=lambda: int(time.time()))
47
+ model: str
48
+ choices: List[StreamChoice]
49
+
50
+
51
+ class ModelInfo(BaseModel):
52
+ """模型信息"""
53
+ id: str
54
+ object: str = "model"
55
+ created: int = Field(default_factory=lambda: int(time.time()))
56
+ owned_by: str = "appigence"
57
+
58
+
59
+ class HealthCheck(BaseModel):
60
+ """健康检查响应"""
61
+ status: str
62
+ timestamp: int
63
+ version: str = "1.0.0"
64
+ models_available: List[str]
65
+
66
+
67
+ # ========== 全局HTTP客户端管理 - 遵循KISS原则 ==========
68
+ class HTTPClientManager:
69
+ """HTTP客户端管理器 - 单例模式,遵循DRY原则"""
70
+
71
+ def __init__(self):
72
+ self.client: Optional[httpx.AsyncClient] = None
73
+ self.semaphore = asyncio.Semaphore(50) # 限制并发请求数
74
+
75
+ async def get_client(self) -> httpx.AsyncClient:
76
+ """获取HTTP客户端实例"""
77
+ if self.client is None:
78
+ self.client = httpx.AsyncClient(
79
+ timeout=httpx.Timeout(300.0),
80
+ limits=httpx.Limits(
81
+ max_keepalive_connections=20,
82
+ max_connections=100
83
+ )
84
+ )
85
+ return self.client
86
+
87
+ async def close(self):
88
+ """关闭HTTP客户端"""
89
+ if self.client:
90
+ await self.client.aclose()
91
+ self.client = None
92
+
93
+
94
+ # 全局客户端管理器实例
95
+ http_manager = HTTPClientManager()
96
+
97
+
98
+ # ========== Appigence处理器 - 遵循单一职责原则 ==========
99
+ class AppigenceHandler:
100
+ """
101
+ Appigence API处理器
102
+ 专注于Appigence API的所有交互逻辑
103
+ """
104
+
105
+ def __init__(self):
106
+ self.model_mapping = {
107
+ "gpt-4": "gpt-4o",
108
+ "gpt-4-turbo": "gpt-4o",
109
+ "gpt-3.5-turbo": "gpt-4o-mini",
110
+ }
111
+ self.supported_models = [
112
+ "gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo",
113
+ "gpt-4", "gpt-4-turbo"
114
+ ]
115
+ self.api_url = "https://api.appigence.com/chat"
116
+ self.headers = {
117
+ "Host": "api.appigence.com",
118
+ "Content-Type": "application/json",
119
+ "Connection": "keep-alive",
120
+ "Accept": "*/*",
121
+ "User-Agent": "ChatWise/1.2.16 CFNetwork/1410.0.3 Darwin/22.6.0",
122
+ "Accept-Language": "zh-CN,zh-Hans;q=0.9",
123
+ "Accept-Encoding": "gzip, deflate, br"
124
+ }
125
+
126
+ def get_supported_models(self) -> List[str]:
127
+ """获取支持的模型列表"""
128
+ return self.supported_models
129
+
130
+ def get_model_info(self, model_id: str) -> ModelInfo:
131
+ """获取模型信息"""
132
+ return ModelInfo(id=model_id, owned_by="appigence")
133
+
134
+ def _convert_request(self, request: ChatCompletionRequest) -> Dict[str, Any]:
135
+ """将OpenAI格式转换为Appigence格式 - 遵循DRY原则"""
136
+ conversation = []
137
+ for msg in request.messages:
138
+ role = "user" if msg.role == "system" else msg.role
139
+ conversation.append({
140
+ "content": msg.content,
141
+ "role": role
142
+ })
143
+
144
+ model_name = self.model_mapping.get(request.model, request.model)
145
+
146
+ return {
147
+ "isPremium": True,
148
+ "modelName": model_name,
149
+ "userConversation": conversation
150
+ }
151
+
152
+ def _parse_sse_line(self, line: str) -> Optional[Dict[str, Any]]:
153
+ """解析SSE数据行"""
154
+ if not line:
155
+ return None
156
+ try:
157
+ return json.loads(line)
158
+ except json.JSONDecodeError:
159
+ return None
160
+
161
+ def _extract_content_delta(self, data: Dict[str, Any]) -> Optional[str]:
162
+ """提取内容增量"""
163
+ try:
164
+ choices = data.get("choices", [])
165
+ if choices and len(choices) > 0:
166
+ delta = choices[0].get("delta", {})
167
+ return delta.get("content", "")
168
+ except (KeyError, IndexError, TypeError):
169
+ return None
170
+
171
+ def _is_finished(self, data: Dict[str, Any]) -> bool:
172
+ """检查流是否结束"""
173
+ try:
174
+ choices = data.get("choices", [])
175
+ if choices and len(choices) > 0:
176
+ return choices[0].get("finish_reason") == "stop"
177
+ except (KeyError, IndexError, TypeError):
178
+ return False
179
+ return False
180
+
181
+ async def handle_stream_request(
182
+ self,
183
+ request: ChatCompletionRequest
184
+ ) -> AsyncGenerator[str, None]:
185
+ """处理流式请求 - 支持高并发"""
186
+ appigence_request = self._convert_request(request)
187
+
188
+ stream_id = f"chatcmpl-{uuid.uuid4().hex}"
189
+ created_time = int(time.time())
190
+
191
+ # 发送角色信息
192
+ yield f"data: {json.dumps({'id': stream_id, 'object': 'chat.completion.chunk', 'created': created_time, 'model': request.model, 'choices': [{'index': 0, 'delta': {'role': 'assistant'}, 'finish_reason': None}]})}\n\n"
193
+
194
+ async with http_manager.semaphore: # 限制并发
195
+ try:
196
+ client = await http_manager.get_client()
197
+ async with client.stream(
198
+ "POST",
199
+ self.api_url,
200
+ json=appigence_request,
201
+ headers=self.headers
202
+ ) as response:
203
+ response.raise_for_status()
204
+
205
+ async for line in response.aiter_lines():
206
+ if not line:
207
+ continue
208
+
209
+ data = self._parse_sse_line(line)
210
+ if not data:
211
+ continue
212
+
213
+ content_delta = self._extract_content_delta(data)
214
+
215
+ if content_delta:
216
+ delta_response = StreamResponse(
217
+ id=stream_id,
218
+ created=created_time,
219
+ model=request.model,
220
+ choices=[StreamChoice(delta={"content": content_delta})]
221
+ )
222
+ yield f"data: {delta_response.json()}\n\n"
223
+
224
+ if self._is_finished(data):
225
+ break
226
+
227
+ # 发送结束标记
228
+ finish_response = StreamResponse(
229
+ id=stream_id,
230
+ created=created_time,
231
+ model=request.model,
232
+ choices=[StreamChoice(delta={}, finish_reason="stop")]
233
+ )
234
+ yield f"data: {finish_response.json()}\n\n"
235
+ yield "data: [DONE]\n\n"
236
+
237
+ except httpx.HTTPStatusError as e:
238
+ error_msg = f"Appigence API error: {e.response.status_code}"
239
+ yield f"data: {json.dumps({'error': error_msg})}\n\n"
240
+ except Exception as e:
241
+ error_msg = f"Internal error: {str(e)}"
242
+ yield f"data: {json.dumps({'error': error_msg})}\n\n"
243
+
244
+ async def handle_non_stream_request(
245
+ self,
246
+ request: ChatCompletionRequest
247
+ ) -> Dict[str, Any]:
248
+ """处理非流式请求"""
249
+ appigence_request = self._convert_request(request)
250
+ content_pieces = []
251
+
252
+ async with http_manager.semaphore: # 限制并发
253
+ client = await http_manager.get_client()
254
+ async with client.stream(
255
+ "POST",
256
+ self.api_url,
257
+ json=appigence_request,
258
+ headers=self.headers
259
+ ) as response:
260
+ response.raise_for_status()
261
+
262
+ async for line in response.aiter_lines():
263
+ if not line:
264
+ continue
265
+
266
+ data = self._parse_sse_line(line)
267
+ if not data:
268
+ continue
269
+
270
+ content_delta = self._extract_content_delta(data)
271
+ if content_delta:
272
+ content_pieces.append(content_delta)
273
+
274
+ if self._is_finished(data):
275
+ break
276
+
277
+ content = "".join(content_pieces)
278
+
279
+ return {
280
+ "id": f"chatcmpl-{uuid.uuid4().hex}",
281
+ "object": "chat.completion",
282
+ "created": int(time.time()),
283
+ "model": request.model,
284
+ "choices": [{
285
+ "message": {"role": "assistant", "content": content},
286
+ "index": 0,
287
+ "finish_reason": "stop"
288
+ }],
289
+ "usage": {
290
+ "prompt_tokens": 0,
291
+ "completion_tokens": 0,
292
+ "total_tokens": 0
293
+ }
294
+ }
295
+
296
+
297
+ # ========== 应用生命周期管理 ==========
298
+ @asynccontextmanager
299
+ async def lifespan(app: FastAPI):
300
+ """应用生命周期管理 - 遵循YAGNI原则"""
301
+ # 启动时初始化
302
+ yield
303
+ # 关闭时清理资源
304
+ await http_manager.close()
305
+
306
+
307
+ # ========== FastAPI应用初始化 ==========
308
+ app = FastAPI(
309
+ title="Appigence OpenAI API Adapter",
310
+ description="高性能Appigence API适配器,支持OpenAI格式调用",
311
+ version="1.0.0",
312
+ lifespan=lifespan
313
+ )
314
+
315
+ # 添加CORS中间件
316
+ app.add_middleware(
317
+ CORSMiddleware,
318
+ allow_origins=["*"],
319
+ allow_credentials=True,
320
+ allow_methods=["*"],
321
+ allow_headers=["*"],
322
+ )
323
+
324
+ # 初始化处理器
325
+ handler = AppigenceHandler()
326
+
327
+ # 可选的API密钥验证
328
+ security = HTTPBearer(auto_error=False)
329
+
330
+ async def get_api_key(credentials: Optional[HTTPAuthorizationCredentials] = Depends(security)):
331
+ """API密钥验证 - 可选功能"""
332
+ # 如果设置了API_KEY环境变量,则进行验证
333
+ required_key = os.getenv("API_KEY")
334
+ if required_key:
335
+ if not credentials or credentials.credentials != required_key:
336
+ raise HTTPException(status_code=401, detail="Invalid API key")
337
+ return credentials
338
+
339
+
340
+ # ========== API端点定义 ==========
341
+ @app.get("/", response_model=Dict[str, str])
342
+ async def root():
343
+ """根端点 - 遵循KISS原则"""
344
+ return {
345
+ "message": "Appigence OpenAI API Adapter",
346
+ "version": "1.0.0",
347
+ "docs": "/docs"
348
+ }
349
+
350
+
351
+ @app.get("/health", response_model=HealthCheck)
352
+ async def health_check():
353
+ """健康检查端点"""
354
+ return HealthCheck(
355
+ status="healthy",
356
+ timestamp=int(time.time()),
357
+ models_available=handler.get_supported_models()
358
+ )
359
+
360
+
361
+ @app.get("/v1/models")
362
+ async def list_models(api_key: Optional[HTTPAuthorizationCredentials] = Depends(get_api_key)):
363
+ """列出所有可用模型"""
364
+ models = [
365
+ handler.get_model_info(model_id).dict()
366
+ for model_id in handler.get_supported_models()
367
+ ]
368
+ return {"object": "list", "data": models}
369
+
370
+
371
+ @app.post("/v1/chat/completions")
372
+ async def chat_completions(
373
+ request: ChatCompletionRequest,
374
+ background_tasks: BackgroundTasks,
375
+ api_key: Optional[HTTPAuthorizationCredentials] = Depends(get_api_key)
376
+ ):
377
+ """
378
+ 处理聊天完成请求 - 统一入口点
379
+ 支持流式和非流式响应
380
+ """
381
+ if not request.messages:
382
+ raise HTTPException(status_code=400, detail="Messages required")
383
+
384
+ # 验证模型
385
+ if request.model not in handler.get_supported_models():
386
+ raise HTTPException(
387
+ status_code=400,
388
+ detail=f"Unsupported model: {request.model}. Supported models: {handler.get_supported_models()}"
389
+ )
390
+
391
+ try:
392
+ if request.stream:
393
+ # 返回流式响应
394
+ return StreamingResponse(
395
+ handler.handle_stream_request(request),
396
+ media_type="text/event-stream",
397
+ headers={
398
+ "Cache-Control": "no-cache",
399
+ "Connection": "keep-alive",
400
+ "X-Accel-Buffering": "no" # 禁用nginx缓冲
401
+ }
402
+ )
403
+ else:
404
+ # 返回非流式响应
405
+ response = await handler.handle_non_stream_request(request)
406
+ return response
407
+
408
+ except httpx.HTTPStatusError as e:
409
+ raise HTTPException(
410
+ status_code=e.response.status_code,
411
+ detail=f"Backend API error: {e.response.text}"
412
+ )
413
+ except Exception as e:
414
+ raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")
415
+
416
+
417
+ # ========== 应用启动配置 ==========
418
+ if __name__ == "__main__":
419
+ port = int(os.getenv("PORT", 7860)) # Hugging Face Spaces默认端口
420
+
421
+ print(f"🚀 Starting Appigence API Adapter on port {port}")
422
+ print(f"📚 API Documentation: http://localhost:{port}/docs")
423
+ print(f"❤️ Health Check: http://localhost:{port}/health")
424
+
425
+ uvicorn.run(
426
+ "appigence_api:app",
427
+ host="0.0.0.0",
428
+ port=port,
429
+ workers=1, # 在容器中使用单worker,通过Gunicorn管理多进程
430
+ log_level="info",
431
+ access_log=True
432
+ )
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ fastapi==0.104.1
2
+ uvicorn[standard]==0.24.0
3
+ httpx==0.25.2
4
+ pydantic==2.5.0
5
+ gunicorn==21.2.0
6
+ python-multipart==0.0.6