TinyCl commited on
Commit
60580c8
·
verified ·
1 Parent(s): 192d3c5

Delete main.py

Browse files
Files changed (1) hide show
  1. main.py +0 -477
main.py DELETED
@@ -1,477 +0,0 @@
1
- from fastapi import FastAPI, Request, Depends, HTTPException
2
- from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
3
- from fastapi.responses import StreamingResponse
4
- from fastapi.background import BackgroundTasks
5
- import requests
6
- from curl_cffi import requests as cffi_requests # 保留这个,用于获取cookies
7
- import uuid
8
- import json
9
- import time
10
- from typing import Optional
11
- import asyncio
12
- import base64
13
- import tempfile
14
- import os
15
- import re
16
-
17
- app = FastAPI()
18
- security = HTTPBearer()
19
-
20
- # OpenAI API Key 配置,可以通过环境变量覆盖
21
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None) # 设置为 None 表示不校验,或设置具体值,如"sk-proj-1234567890"
22
-
23
- # 修改全局数据存储
24
- global_data = {
25
- "cookie": None,
26
- "cookies": None,
27
- "last_update": 0
28
- }
29
-
30
- def get_cookie():
31
- try:
32
- # 使用 curl_cffi 发送请求
33
- response = cffi_requests.get(
34
- 'https://chat.akash.network/',
35
- impersonate="chrome110",
36
- timeout=30
37
- )
38
-
39
- # 获取所有 cookies
40
- cookies = response.cookies.items()
41
- if cookies:
42
- cookie_str = '; '.join([f'{k}={v}' for k, v in cookies])
43
- global_data["cookie"] = cookie_str
44
- global_data["last_update"] = time.time()
45
- print(f"Got cookies: {cookie_str}")
46
- return cookie_str
47
-
48
- except Exception as e:
49
- print(f"Error fetching cookie: {e}")
50
- return None
51
-
52
- async def check_and_update_cookie(background_tasks: BackgroundTasks):
53
- # 如果cookie超过30分钟,在后台更新
54
- if time.time() - global_data["last_update"] > 1800:
55
- background_tasks.add_task(get_cookie)
56
-
57
- @app.on_event("startup")
58
- async def startup_event():
59
- get_cookie()
60
-
61
- async def get_api_key(credentials: HTTPAuthorizationCredentials = Depends(security)):
62
- token = credentials.credentials
63
-
64
- # 如果设置了 OPENAI_API_KEY,则需要验证
65
- if OPENAI_API_KEY is not None:
66
- # 去掉 Bearer 前缀后再比较
67
- clean_token = token.replace("Bearer ", "") if token.startswith("Bearer ") else token
68
- if clean_token != OPENAI_API_KEY:
69
- raise HTTPException(
70
- status_code=401,
71
- detail="Invalid API key"
72
- )
73
-
74
- # 返回去掉 "Bearer " 前缀的token
75
- return token.replace("Bearer ", "") if token.startswith("Bearer ") else token
76
-
77
- async def check_image_status(session: requests.Session, job_id: str, headers: dict) -> Optional[str]:
78
- """检查图片生成状态并获取生成的图片"""
79
- max_retries = 30
80
- for attempt in range(max_retries):
81
- try:
82
- print(f"\nAttempt {attempt + 1}/{max_retries} for job {job_id}")
83
- response = session.get(
84
- f'https://chat.akash.network/api/image-status?ids={job_id}',
85
- headers=headers
86
- )
87
- print(f"Status response code: {response.status_code}")
88
- status_data = response.json()
89
-
90
- if status_data and isinstance(status_data, list) and len(status_data) > 0:
91
- job_info = status_data[0]
92
- status = job_info.get('status')
93
- print(f"Job status: {status}")
94
-
95
- # 只有当状态为 completed 时才处理结果
96
- if status == "completed":
97
- result = job_info.get("result")
98
- if result and not result.startswith("Failed"):
99
- print("Got valid result, attempting upload...")
100
- image_url = await upload_to_xinyew(result, job_id)
101
- if image_url:
102
- print(f"Successfully uploaded image: {image_url}")
103
- return image_url
104
- print("Image upload failed")
105
- return None
106
- print("Invalid result received")
107
- return None
108
- elif status == "failed":
109
- print(f"Job {job_id} failed")
110
- return None
111
-
112
- # 如果状态是其他(如 pending),继续等待
113
- await asyncio.sleep(1)
114
- continue
115
-
116
- except Exception as e:
117
- print(f"Error checking status: {e}")
118
- return None
119
-
120
- print(f"Timeout waiting for job {job_id}")
121
- return None
122
-
123
- @app.get("/")
124
- async def health_check():
125
- """Health check endpoint"""
126
- return {"status": "ok"}
127
-
128
- @app.post("/v1/chat/completions")
129
- async def chat_completions(
130
- request: Request,
131
- api_key: str = Depends(get_api_key)
132
- ):
133
- try:
134
- data = await request.json()
135
- print(f"Chat request data: {data}")
136
-
137
- chat_id = str(uuid.uuid4()).replace('-', '')[:16]
138
-
139
- akash_data = {
140
- "id": chat_id,
141
- "messages": data.get('messages', []),
142
- "model": data.get('model', "DeepSeek-R1"),
143
- "system": data.get('system_message', "You are a helpful assistant."),
144
- "temperature": data.get('temperature', 0.6),
145
- "topP": data.get('top_p', 0.95)
146
- }
147
-
148
- headers = {
149
- "Content-Type": "application/json",
150
- "Cookie": f"session_token={api_key}",
151
- "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
152
- "Accept": "*/*",
153
- "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
154
- "Accept-Encoding": "gzip, deflate, br",
155
- "Origin": "https://chat.akash.network",
156
- "Referer": "https://chat.akash.network/",
157
- "Sec-Fetch-Dest": "empty",
158
- "Sec-Fetch-Mode": "cors",
159
- "Sec-Fetch-Site": "same-origin",
160
- "Connection": "keep-alive",
161
- "Priority": "u=1, i"
162
- }
163
-
164
- print(f"Sending request to Akash with headers: {headers}")
165
- print(f"Request data: {akash_data}")
166
-
167
- with requests.Session() as session:
168
- response = session.post(
169
- 'https://chat.akash.network/api/chat',
170
- json=akash_data,
171
- headers=headers,
172
- stream=True
173
- )
174
-
175
- def generate():
176
- content_buffer = ""
177
- for line in response.iter_lines():
178
- if not line:
179
- continue
180
-
181
- try:
182
- line_str = line.decode('utf-8')
183
- msg_type, msg_data = line_str.split(':', 1)
184
-
185
- if msg_type == '0':
186
- if msg_data.startswith('"') and msg_data.endswith('"'):
187
- msg_data = msg_data.replace('\\"', '"')
188
- msg_data = msg_data[1:-1]
189
- msg_data = msg_data.replace("\\n", "\n")
190
-
191
- # 在处理消息时先判断模型类型
192
- if data.get('model') == 'AkashGen' and "<image_generation>" in msg_data:
193
- # 图片生成模型的特殊处理
194
- async def process_and_send():
195
- messages = await process_image_generation(msg_data, session, headers, chat_id)
196
- if messages:
197
- return messages
198
- return None
199
-
200
- # 创建新的事件循环
201
- loop = asyncio.new_event_loop()
202
- asyncio.set_event_loop(loop)
203
- try:
204
- result_messages = loop.run_until_complete(process_and_send())
205
- finally:
206
- loop.close()
207
-
208
- if result_messages:
209
- for message in result_messages:
210
- yield f"data: {json.dumps(message)}\n\n"
211
- continue
212
-
213
- content_buffer += msg_data
214
-
215
- chunk = {
216
- "id": f"chatcmpl-{chat_id}",
217
- "object": "chat.completion.chunk",
218
- "created": int(time.time()),
219
- "model": data.get('model'),
220
- "choices": [{
221
- "delta": {"content": msg_data},
222
- "index": 0,
223
- "finish_reason": None
224
- }]
225
- }
226
- yield f"data: {json.dumps(chunk)}\n\n"
227
-
228
- elif msg_type in ['e', 'd']:
229
- chunk = {
230
- "id": f"chatcmpl-{chat_id}",
231
- "object": "chat.completion.chunk",
232
- "created": int(time.time()),
233
- "model": data.get('model'), # 使用请求中指定的模型
234
- "choices": [{
235
- "delta": {},
236
- "index": 0,
237
- "finish_reason": "stop"
238
- }]
239
- }
240
- yield f"data: {json.dumps(chunk)}\n\n"
241
- yield "data: [DONE]\n\n"
242
- break
243
-
244
- except Exception as e:
245
- print(f"Error processing line: {e}")
246
- continue
247
-
248
- return StreamingResponse(
249
- generate(),
250
- media_type='text/event-stream',
251
- headers={
252
- 'Cache-Control': 'no-cache',
253
- 'Connection': 'keep-alive',
254
- 'Content-Type': 'text/event-stream'
255
- }
256
- )
257
-
258
- except Exception as e:
259
- return {"error": str(e)}
260
-
261
- @app.get("/v1/models")
262
- async def list_models(api_key: str = Depends(get_api_key)):
263
- try:
264
- headers = {
265
- "Content-Type": "application/json",
266
- "Cookie": f"session_token={api_key}",
267
- "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
268
- "Accept": "*/*",
269
- "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
270
- "Accept-Encoding": "gzip, deflate, br",
271
- "Origin": "https://chat.akash.network",
272
- "Referer": "https://chat.akash.network/",
273
- "Sec-Fetch-Dest": "empty",
274
- "Sec-Fetch-Mode": "cors",
275
- "Sec-Fetch-Site": "same-origin",
276
- "Connection": "keep-alive"
277
- }
278
-
279
- response = requests.get(
280
- 'https://chat.akash.network/api/models',
281
- headers=headers
282
- )
283
-
284
- akash_response = response.json()
285
-
286
- # 添加错误处理和调试信息
287
- print(f"Akash API response: {akash_response}")
288
-
289
- # 检查响应格式并适配
290
- models_list = []
291
- if isinstance(akash_response, list):
292
- # 如果直接是列表
293
- models_list = akash_response
294
- elif isinstance(akash_response, dict):
295
- # 如果是字典格式
296
- models_list = akash_response.get("models", [])
297
- else:
298
- print(f"Unexpected response format: {type(akash_response)}")
299
- models_list = []
300
-
301
- # 转换为标准 OpenAI 格式
302
- openai_models = {
303
- "object": "list",
304
- "data": [
305
- {
306
- "id": model["id"] if isinstance(model, dict) else model,
307
- "object": "model",
308
- "created": int(time.time()),
309
- "owned_by": "akash",
310
- "permission": [{
311
- "id": f"modelperm-{model['id'] if isinstance(model, dict) else model}",
312
- "object": "model_permission",
313
- "created": int(time.time()),
314
- "allow_create_engine": False,
315
- "allow_sampling": True,
316
- "allow_logprobs": True,
317
- "allow_search_indices": False,
318
- "allow_view": True,
319
- "allow_fine_tuning": False,
320
- "organization": "*",
321
- "group": None,
322
- "is_blocking": False
323
- }]
324
- } for model in models_list
325
- ]
326
- }
327
-
328
- return openai_models
329
-
330
- except Exception as e:
331
- print(f"Error in list_models: {e}")
332
- import traceback
333
- print(traceback.format_exc())
334
- return {"error": str(e)}
335
-
336
- async def upload_to_xinyew(image_base64: str, job_id: str) -> Optional[str]:
337
- """上传图片到新野图床并返回URL"""
338
- try:
339
- print(f"\n=== Starting image upload for job {job_id} ===")
340
- print(f"Base64 data length: {len(image_base64)}")
341
-
342
- # 解码base64图片数据
343
- try:
344
- image_data = base64.b64decode(image_base64.split(',')[1] if ',' in image_base64 else image_base64)
345
- print(f"Decoded image data length: {len(image_data)} bytes")
346
- except Exception as e:
347
- print(f"Error decoding base64: {e}")
348
- print(f"First 100 chars of base64: {image_base64[:100]}...")
349
- return None
350
-
351
- # 创建临时文件
352
- with tempfile.NamedTemporaryFile(suffix='.jpeg', delete=False) as temp_file:
353
- temp_file.write(image_data)
354
- temp_file_path = temp_file.name
355
-
356
- try:
357
- filename = f"{job_id}.jpeg"
358
- print(f"Using filename: {filename}")
359
-
360
- # 准备文件上传
361
- files = {
362
- 'file': (filename, open(temp_file_path, 'rb'), 'image/jpeg')
363
- }
364
-
365
- print("Sending request to xinyew.cn...")
366
- response = requests.post(
367
- 'https://api.xinyew.cn/api/jdtc',
368
- files=files,
369
- timeout=30
370
- )
371
-
372
- print(f"Upload response status: {response.status_code}")
373
- if response.status_code == 200:
374
- result = response.json()
375
- print(f"Upload response: {result}")
376
-
377
- if result.get('errno') == 0:
378
- url = result.get('data', {}).get('url')
379
- if url:
380
- print(f"Successfully got image URL: {url}")
381
- return url
382
- print("No URL in response data")
383
- else:
384
- print(f"Upload failed: {result.get('message')}")
385
- else:
386
- print(f"Upload failed with status {response.status_code}")
387
- print(f"Response content: {response.text}")
388
- return None
389
-
390
- finally:
391
- # 清理临时文件
392
- try:
393
- os.unlink(temp_file_path)
394
- except Exception as e:
395
- print(f"Error removing temp file: {e}")
396
-
397
- except Exception as e:
398
- print(f"Error in upload_to_xinyew: {e}")
399
- import traceback
400
- print(traceback.format_exc())
401
- return None
402
-
403
- async def process_image_generation(msg_data: str, session: requests.Session, headers: dict, chat_id: str) -> Optional[list]:
404
- """处理图片生成的逻辑,返回多个消息块"""
405
- match = re.search(r"jobId='([^']+)' prompt='([^']+)' negative='([^']*)'", msg_data)
406
- if match:
407
- job_id, prompt, negative = match.groups()
408
- print(f"Starting image generation process for job_id: {job_id}")
409
-
410
- # 记录开始时间
411
- start_time = time.time()
412
-
413
- # 发送思考开始的消息
414
- think_msg = "<think>\n"
415
- think_msg += "🎨 Generating image...\n\n"
416
- think_msg += f"Prompt: {prompt}\n"
417
-
418
- # 检查图片状态和上传
419
- result = await check_image_status(session, job_id, headers)
420
-
421
- # 计算实际花费的时间
422
- elapsed_time = time.time() - start_time
423
-
424
- # 完成思考部分
425
- think_msg += f"\n🤔 Thinking for {elapsed_time:.1f}s...\n"
426
- think_msg += "</think>"
427
-
428
- # 返回两个独立的消息块
429
- messages = []
430
-
431
- # 第一个消息块:思考过程
432
- messages.append({
433
- "id": f"chatcmpl-{chat_id}-think",
434
- "object": "chat.completion.chunk",
435
- "created": int(time.time()),
436
- "model": "AkashGen",
437
- "choices": [{
438
- "delta": {"content": think_msg},
439
- "index": 0,
440
- "finish_reason": None
441
- }]
442
- })
443
-
444
- # 第二个消息块:图片结果
445
- if result:
446
- image_msg = f"\n\n![Generated Image]({result})"
447
- messages.append({
448
- "id": f"chatcmpl-{chat_id}-image",
449
- "object": "chat.completion.chunk",
450
- "created": int(time.time()),
451
- "model": "AkashGen",
452
- "choices": [{
453
- "delta": {"content": image_msg},
454
- "index": 0,
455
- "finish_reason": None
456
- }]
457
- })
458
- else:
459
- fail_msg = "\n\n*Image generation or upload failed.*"
460
- messages.append({
461
- "id": f"chatcmpl-{chat_id}-fail",
462
- "object": "chat.completion.chunk",
463
- "created": int(time.time()),
464
- "model": "AkashGen",
465
- "choices": [{
466
- "delta": {"content": fail_msg},
467
- "index": 0,
468
- "finish_reason": None
469
- }]
470
- })
471
-
472
- return messages
473
- return None
474
-
475
- if __name__ == '__main__':
476
- import uvicorn
477
- uvicorn.run(app, host='0.0.0.0', port=9000)