Hivra commited on
Commit
8d83cb6
·
verified ·
1 Parent(s): bea3b89

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +232 -0
app.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, Request
2
+ from fastapi.responses import JSONResponse, StreamingResponse
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ import httpx
5
+ import json
6
+ import os
7
+ import time
8
+ import asyncio
9
+
10
+ app = FastAPI()
11
+
12
+ # CORS Configuration
13
+ app.add_middleware(
14
+ CORSMiddleware,
15
+ allow_origins=["*"],
16
+ allow_credentials=True,
17
+ allow_methods=["*"],
18
+ allow_headers=["*"],
19
+ )
20
+
21
+ # Environment Configuration
22
+ STATUS_URL = os.environ.get("STATUS_URL", "https://duckduckgo.com/duckchat/v1/status")
23
+ CHAT_URL = os.environ.get("CHAT_URL", "https://duckduckgo.com/duckchat/v1/chat")
24
+ REFERER = os.environ.get("REFERER", "https://duckduckgo.com/")
25
+ ORIGIN = os.environ.get("ORIGIN", "https://duckduckgo.com")
26
+ USER_AGENT = os.environ.get("USER_AGENT", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36")
27
+ COOKIE = os.environ.get("COOKIE", "dcm=3; s=l; bf=1")
28
+
29
+ DEFAULT_HEADERS = {
30
+ "User-Agent": USER_AGENT,
31
+ "Accept": "text/event-stream",
32
+ "Accept-Language": "en-US,en;q=0.5",
33
+ "Referer": REFERER,
34
+ "Content-Type": "application/json",
35
+ "Origin": ORIGIN,
36
+ "Connection": "keep-alive",
37
+ "Cookie": COOKIE,
38
+ "Sec-Fetch-Dest": "empty",
39
+ "Sec-Fetch-Mode": "cors",
40
+ "Sec-Fetch-Site": "same-origin",
41
+ "Pragma": "no-cache",
42
+ "TE": "trailers",
43
+ }
44
+
45
+ SUPPORTED_MODELS = ["o3-mini", "gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Llama-3.3-70B-Instruct-Turbo"]
46
+ TIMEOUT = 30.0 # Seconds
47
+
48
+ async def get_vqd():
49
+ """Fetch DuckDuckGo authentication token."""
50
+ try:
51
+ async with httpx.AsyncClient() as client:
52
+ response = await client.get(
53
+ STATUS_URL,
54
+ headers={**DEFAULT_HEADERS, "x-vqd-accept": "1"},
55
+ timeout=10.0
56
+ )
57
+ response.raise_for_status()
58
+ vqd = response.headers.get("x-vqd-4")
59
+ if not vqd:
60
+ raise ValueError("Missing x-vqd-4 header in response")
61
+ return vqd
62
+ except httpx.HTTPStatusError as e:
63
+ raise HTTPException(status_code=e.response.status_code, detail=f"VQD fetch failed: {str(e)}")
64
+ except Exception as e:
65
+ raise HTTPException(status_code=500, detail=f"VQD error: {str(e)}")
66
+
67
+ async def duckduckgo_chat_stream(model: str, messages: list):
68
+ """Handle streaming chat response."""
69
+ try:
70
+ x_vqd_4 = await get_vqd()
71
+ chat_headers = {
72
+ **DEFAULT_HEADERS,
73
+ "x-vqd-4": x_vqd_4,
74
+ "Accept": "text/event-stream",
75
+ }
76
+
77
+ async with httpx.AsyncClient() as client:
78
+ response = await client.post(
79
+ CHAT_URL,
80
+ headers=chat_headers,
81
+ json={"model": model, "messages": messages},
82
+ timeout=TIMEOUT
83
+ )
84
+ response.raise_for_status()
85
+
86
+ async def event_generator():
87
+ try:
88
+ async for chunk in response.aiter_bytes():
89
+ decoded_chunk = chunk.decode('utf-8')
90
+ for line in decoded_chunk.split('\n'):
91
+ line = line.strip()
92
+ if line.startswith("data: "):
93
+ try:
94
+ data = json.loads(line[5:])
95
+ if "error" in data:
96
+ yield f"data: {json.dumps({'error': data['error']})}\n\n"
97
+ return
98
+
99
+ message = data.get("message", "")
100
+ if not message:
101
+ continue
102
+
103
+ yield format_openai_chunk(message, model)
104
+ await asyncio.sleep(0.001) # Rate limit
105
+ except json.JSONDecodeError as e:
106
+ yield f"data: {json.dumps({'error': f'JSON error: {str(e)}'})}\n\n"
107
+ return
108
+ except Exception as e:
109
+ yield f"data: {json.dumps({'error': f'Stream error: {str(e)}'})}\n\n"
110
+ finally:
111
+ yield "data: [DONE]\n\n"
112
+
113
+ return StreamingResponse(event_generator(), media_type="text/event-stream")
114
+
115
+ except httpx.HTTPStatusError as e:
116
+ raise HTTPException(status_code=e.response.status_code, detail=f"Chat error: {str(e)}")
117
+ except Exception as e:
118
+ raise HTTPException(status_code=500, detail=f"Chat error: {str(e)}")
119
+
120
+ async def duckduckgo_chat_non_stream(model: str, messages: list):
121
+ """Handle non-streaming chat response."""
122
+ try:
123
+ x_vqd_4 = await get_vqd()
124
+ chat_headers = {
125
+ **DEFAULT_HEADERS,
126
+ "x-vqd-4": x_vqd_4,
127
+ }
128
+
129
+ async with httpx.AsyncClient() as client:
130
+ response = await client.post(
131
+ CHAT_URL,
132
+ headers=chat_headers,
133
+ json={"model": model, "messages": messages},
134
+ timeout=TIMEOUT
135
+ )
136
+ response.raise_for_status()
137
+
138
+ full_response = []
139
+ async for chunk in response.aiter_bytes():
140
+ decoded_chunk = chunk.decode('utf-8')
141
+ for line in decoded_chunk.split('\n'):
142
+ line = line.strip()
143
+ if line.startswith("data: "):
144
+ try:
145
+ data = json.loads(line[5:])
146
+ full_response.append(data.get("message", ""))
147
+ except json.JSONDecodeError:
148
+ continue
149
+
150
+ return "".join(full_response)
151
+
152
+ except httpx.HTTPStatusError as e:
153
+ raise HTTPException(status_code=e.response.status_code, detail=f"Chat error: {str(e)}")
154
+ except Exception as e:
155
+ raise HTTPException(status_code=500, detail=f"Chat error: {str(e)}")
156
+
157
+ def format_openai_chunk(content: str, model: str):
158
+ """Format response chunk in OpenAI style."""
159
+ return json.dumps({
160
+ "id": f"chatcmpl-{int(time.time()*1000)}",
161
+ "object": "chat.completion.chunk",
162
+ "created": int(time.time()),
163
+ "model": model,
164
+ "choices": [{
165
+ "delta": {"content": content},
166
+ "index": 0,
167
+ "finish_reason": None
168
+ }]
169
+ }) + "\n\n"
170
+
171
+ @app.post("/v1/chat/completions")
172
+ async def chat_completions(request: Request):
173
+ try:
174
+ data = await request.json()
175
+ model = data.get("model", "o3-mini")
176
+ messages = data.get("messages", [])
177
+ stream = data.get("stream", False)
178
+
179
+ # Validation
180
+ if model not in SUPPORTED_MODELS:
181
+ raise HTTPException(400, f"Unsupported model: {model}")
182
+ if not messages:
183
+ raise HTTPException(400, "Empty messages list")
184
+
185
+ # Process messages
186
+ system_message = next((m for m in messages if m["role"] == "system"), None)
187
+ history = "\n".join(
188
+ f"{m['role']}: {m['content']}"
189
+ for m in messages
190
+ if m["role"] != "system" and m != messages[-1]
191
+ )
192
+ current_query = messages[-1]["content"] if messages else ""
193
+
194
+ combined_content = f"{system_message['content']}\n{history}\nUser: {current_query}" if system_message else f"{history}\nUser: {current_query}"
195
+ payload = [{"role": "user", "content": combined_content}]
196
+
197
+ if stream:
198
+ return await duckduckgo_chat_stream(model, payload)
199
+ else:
200
+ response_text = await duckduckgo_chat_non_stream(model, payload)
201
+ return JSONResponse({
202
+ "id": f"chatcmpl-{int(time.time()*1000)}",
203
+ "object": "chat.completion",
204
+ "created": int(time.time()),
205
+ "model": model,
206
+ "choices": [{
207
+ "message": {"role": "assistant", "content": response_text},
208
+ "finish_reason": "stop",
209
+ "index": 0
210
+ }],
211
+ "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
212
+ })
213
+
214
+ except HTTPException as e:
215
+ raise e
216
+ except Exception as e:
217
+ raise HTTPException(500, f"Server error: {str(e)}")
218
+
219
+ @app.get("/")
220
+ async def health_check():
221
+ return {"status": "healthy", "timestamp": int(time.time())}
222
+
223
+ @app.exception_handler(HTTPException)
224
+ async def http_error_handler(request: Request, exc: HTTPException):
225
+ return JSONResponse(
226
+ status_code=exc.status_code,
227
+ content={"error": exc.detail},
228
+ )
229
+
230
+ if __name__ == "__main__":
231
+ import uvicorn
232
+ uvicorn.run(app, host="0.0.0.0", port=7860)