Ganesh Chintalapati commited on
Commit
3bc4707
·
1 Parent(s): 9c644da

modular code apsssjjjadfdr

Browse files
Files changed (1) hide show
  1. api.py +227 -46
api.py CHANGED
@@ -1,48 +1,229 @@
1
- import asyncio
2
  import httpx
3
- from openai import AsyncOpenAI
4
- from anthropic import AsyncAnthropic
5
- from google.generativeai import GenerativeModel
6
-
7
- async def ask_openai(query, history):
8
- client = AsyncOpenAI(api_key="your_openai_api_key")
9
- messages = [{"role": "user", "content": query}]
10
- for h in history:
11
- messages.append({"role": "user", "content": h.get("user", "")})
12
- if h.get("openai"):
13
- messages.append({"role": "assistant", "content": h["openai"]})
14
- stream = await client.chat.completions.create(
15
- model="gpt-4o-mini", messages=messages, stream=True
16
- )
17
- async for chunk in stream:
18
- if chunk.choices[0].delta.content:
19
- yield chunk.choices[0].delta.content
20
-
21
- async def ask_anthropic(query, history):
22
- client = AsyncAnthropic(api_key="your_anthropic_api_key")
23
- messages = [{"role": "user", "content": query}]
24
- for h in history:
25
- messages.append({"role": "user", "content": h.get("user", "")})
26
- if h.get("anthropic"):
27
- messages.append({"role": "assistant", "content": h["anthropic"]})
28
- stream = await client.messages.create(
29
- model="claude-3-5-sonnet-20241022", max_tokens=1024, messages=messages, stream=True
30
- )
31
- async for chunk in stream:
32
- if chunk.content and chunk.content[0].text:
33
- yield chunk.content[0].text
34
-
35
- async def ask_gemini(query, history):
36
- client = GenerativeModel("gemini-1.5-flash", api_key="your_gemini_api_key")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  history_text = ""
38
- for h in history:
39
- user_msg = h.get("user", "")
40
- if user_msg:
41
- history_text += f"User: {user_msg}\n"
42
- if h.get("gemini"):
43
- history_text += f"Assistant: {h['gemini']}\n"
44
- full_prompt = history_text + f"User: {query}\n"
45
- stream = client.generate_content(full_prompt, stream=True)
46
- async for chunk in stream:
47
- if chunk.text:
48
- yield chunk.text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  import httpx
3
+ import json
4
+ import traceback
5
+ from typing import AsyncGenerator, List, Dict
6
+ from config import logger
7
+
8
+ async def ask_openai(query: str, history: List[Dict[str, str]]) -> AsyncGenerator[str, None]:
9
+ openai_api_key = os.getenv("OPENAI_API_KEY")
10
+ if not openai_api_key:
11
+ logger.error("OpenAI API key not provided")
12
+ yield "Error: OpenAI API key not provided."
13
+ return
14
+
15
+ # Build message history with user and assistant roles
16
+ messages = []
17
+ for msg in history:
18
+ messages.append({"role": "user", "content": msg["user"]})
19
+ if msg["bot"]:
20
+ messages.append({"role": "assistant", "content": msg["bot"]})
21
+ messages.append({"role": "user", "content": query})
22
+
23
+ headers = {
24
+ "Authorization": f"Bearer {openai_api_key}",
25
+ "Content-Type": "application/json"
26
+ }
27
+
28
+ payload = {
29
+ "model": "gpt-3.5-turbo",
30
+ "messages": messages,
31
+ "stream": True
32
+ }
33
+
34
+ try:
35
+ async with httpx.AsyncClient() as client:
36
+ async with client.stream("POST", "https://api.openai.com/v1/chat/completions", headers=headers, json=payload) as response:
37
+ response.raise_for_status()
38
+ buffer = ""
39
+ async for chunk in response.aiter_text():
40
+ if chunk:
41
+ buffer += chunk
42
+ while "\n" in buffer:
43
+ line, buffer = buffer.split("\n", 1)
44
+ if line.startswith("data: "):
45
+ data = line[6:]
46
+ if data == "[DONE]":
47
+ break
48
+ if not data.strip():
49
+ continue
50
+ try:
51
+ json_data = json.loads(data)
52
+ if "choices" in json_data and json_data["choices"]:
53
+ delta = json_data["choices"][0].get("delta", {})
54
+ if "content" in delta and delta["content"] is not None:
55
+ logger.info(f"OpenAI yielding chunk: {delta['content']}")
56
+ yield delta["content"]
57
+ except json.JSONDecodeError as e:
58
+ logger.error(f"Error parsing OpenAI stream chunk: {str(e)} - Data: {data}")
59
+ yield f"Error parsing stream: {str(e)}"
60
+ except Exception as e:
61
+ logger.error(f"Unexpected error in OpenAI stream: {str(e)} - Data: {data}")
62
+ yield f"Error in stream: {str(e)}"
63
+
64
+ except httpx.HTTPStatusError as e:
65
+ response_text = await e.response.aread()
66
+ logger.error(f"OpenAI HTTP Status Error: {e.response.status_code}, {response_text}")
67
+ yield f"Error: OpenAI HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}"
68
+ except Exception as e:
69
+ logger.error(f"OpenAI Error: {str(e)}")
70
+ yield f"Error: OpenAI Error: {str(e)}"
71
+
72
+ async def ask_anthropic(query: str, history: List[Dict[str, str]]) -> AsyncGenerator[str, None]:
73
+ anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
74
+ if not anthropic_api_key:
75
+ logger.error("Anthropic API key not provided")
76
+ yield "Error: Anthropic API key not provided."
77
+ return
78
+
79
+ messages = []
80
+ for msg in history:
81
+ messages.append({"role": "user", "content": msg["user"]})
82
+ if msg["bot"]:
83
+ messages.append({"role": "assistant", "content": msg["bot"]})
84
+ messages.append({"role": "user", "content": query})
85
+
86
+ headers = {
87
+ "x-api-key": anthropic_api_key,
88
+ "anthropic-version": "2023-06-01",
89
+ "Content-Type": "application/json"
90
+ }
91
+
92
+ payload = {
93
+ "model": "claude-3-5-sonnet-20241022",
94
+ "max_tokens": 1024,
95
+ "messages": messages,
96
+ "stream": True
97
+ }
98
+
99
+ try:
100
+ async with httpx.AsyncClient(timeout=30.0) as client:
101
+ logger.info(f"Sending Anthropic streaming request: {payload}")
102
+ async with client.stream("POST", "https://api.anthropic.com/v1/messages", headers=headers, json=payload) as response:
103
+ response.raise_for_status()
104
+ buffer = ""
105
+ async for chunk in response.aiter_text():
106
+ if chunk:
107
+ buffer += chunk
108
+ while "\n" in buffer:
109
+ line, buffer = buffer.split("\n", 1)
110
+ if line.startswith("data: "):
111
+ data = line[6:]
112
+ if data.strip() == "[DONE]":
113
+ break
114
+ if not data.strip():
115
+ continue
116
+ try:
117
+ json_data = json.loads(data)
118
+ if json_data.get("type") == "content_block_delta" and "delta" in json_data and "text" in json_data["delta"]:
119
+ logger.info(f"Anthropic yielding chunk: {json_data['delta']['text']}")
120
+ yield json_data["delta"]["text"]
121
+ elif json_data.get("type") == "message_start" or json_data.get("type") == "message_delta":
122
+ continue
123
+ except json.JSONDecodeError as e:
124
+ logger.error(f"Error parsing Anthropic stream chunk: {str(e)} - Data: {data}")
125
+ yield f"Error parsing stream: {str(e)}"
126
+ except Exception as e:
127
+ logger.error(f"Unexpected error in Anthropic stream: {str(e)} - Data: {data}")
128
+ yield f"Error in stream: {str(e)}"
129
+
130
+ except httpx.HTTPStatusError as e:
131
+ response_text = await e.response.aread()
132
+ logger.error(f"Anthropic HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}")
133
+ yield f"Error: Anthropic HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}"
134
+ except Exception as e:
135
+ logger.error(f"Anthropic Error: {str(e)}\nStack trace: {traceback.format_exc()}")
136
+ yield f"Error: Anthropic Error: {str(e)}"
137
+
138
+ async def ask_gemini(query: str, history: List[Dict[str, str]]) -> AsyncGenerator[str, None]:
139
+ gemini_api_key = os.getenv("GEMINI_API_KEY")
140
+ if not gemini_api_key:
141
+ logger.error("Gemini API key not provided")
142
+ yield "Error: Gemini API key not provided."
143
+ return
144
+
145
  history_text = ""
146
+ for msg in history:
147
+ history_text += f"User: {msg['user']}\nAssistant: {msg['bot']}\n" if msg['bot'] else f"User: {msg['user']}\n"
148
+ full_query = history_text + f"User: {query}\n"
149
+
150
+ headers = {
151
+ "Content-Type": "application/json"
152
+ }
153
+
154
+ payload = {
155
+ "contents": [{"parts": [{"text": full_query}]}]
156
+ }
157
+
158
+ try:
159
+ async with httpx.AsyncClient(timeout=30.0) as client:
160
+ logger.info(f"Sending Gemini streaming request: {payload}")
161
+ async with client.stream(
162
+ "POST",
163
+ f"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key={gemini_api_key}",
164
+ headers=headers,
165
+ json=payload
166
+ ) as response:
167
+ response.raise_for_status()
168
+ buffer = ""
169
+ async for chunk in response.aiter_text():
170
+ if chunk:
171
+ buffer += chunk
172
+ logger.info(f"Gemini stream chunk: {chunk}")
173
+ while buffer.strip():
174
+ try:
175
+ json_data = json.loads(buffer)
176
+ logger.info(f"Parsed Gemini JSON: {json_data}")
177
+ buffer = ""
178
+ objects = json_data if isinstance(json_data, list) else [json_data]
179
+ for obj in objects:
180
+ if isinstance(obj, dict) and "candidates" in obj and obj["candidates"]:
181
+ content = obj["candidates"][0].get("content", {})
182
+ if "parts" in content and content["parts"]:
183
+ text = content["parts"][0].get("text", "")
184
+ if text:
185
+ logger.info(f"Gemini yielding chunk: {text}")
186
+ yield text
187
+ break
188
+ except json.JSONDecodeError:
189
+ brace_count = 0
190
+ split_index = -1
191
+ for i, char in enumerate(buffer):
192
+ if char == '{':
193
+ brace_count += 1
194
+ elif char == '}':
195
+ brace_count -= 1
196
+ if brace_count == 0:
197
+ split_index = i + 1
198
+ if split_index > 0:
199
+ try:
200
+ json_str = buffer[:split_index]
201
+ json_data = json.loads(json_str)
202
+ logger.info(f"Parsed Gemini JSON: {json_data}")
203
+ buffer = buffer[split_index:].lstrip(',')
204
+ objects = json_data if isinstance(json_data, list) else [json_data]
205
+ for obj in objects:
206
+ if isinstance(obj, dict) and "candidates" in obj and obj["candidates"]:
207
+ content = obj["candidates"][0].get("content", {})
208
+ if "parts" in content and content["parts"]:
209
+ text = content["parts"][0].get("text", "")
210
+ if text:
211
+ logger.info(f"Gemini yielding chunk: {text}")
212
+ yield text
213
+ continue
214
+ except json.JSONDecodeError:
215
+ pass
216
+ break
217
+ except Exception as e:
218
+ logger.error(f"Unexpected error in Gemini stream: {str(e)} - Buffer: {buffer}")
219
+ yield f"Error in stream: {str(e)}"
220
+ buffer = ""
221
+ break
222
+
223
+ except httpx.HTTPStatusError as e:
224
+ response_text = await e.response.aread()
225
+ logger.error(f"Gemini HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}")
226
+ yield f"Error: Gemini HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}"
227
+ except Exception as e:
228
+ logger.error(f"Gemini Error: {str(e)}\nStack trace: {traceback.format_exc()}")
229
+ yield f"Error: Gemini Error: {str(e)}"