huijio commited on
Commit
9af3cdd
·
verified ·
1 Parent(s): 0bf5734

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +186 -343
app.py CHANGED
@@ -1,372 +1,215 @@
1
  import gradio as gr
2
  import requests
3
  from html import escape
4
- import re
5
- from typing import List, Tuple
6
- import time
7
 
8
- # API endpoints
9
- API_URL = "https://aham2api-3.onrender.com/v1/chat/completions"
10
- MODELS_URL = "https://aham2api-3.onrender.com/v1/models"
11
-
12
- # Enhanced CSS with modern UI
13
- css = """
14
- :root {
15
- --primary: #4f46e5;
16
- --primary-hover: #4338ca;
17
- --background: #ffffff;
18
- --text: #111827;
19
- --user-bg: #f0f7ff;
20
- --bot-bg: #f9fafb;
21
- --border: #e5e7eb;
22
- --shadow: 0 1px 3px rgba(0,0,0,0.1);
23
- --radius: 12px;
24
- --input-bg: #f9fafb;
25
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- .dark {
28
- --primary: #6366f1;
29
- --primary-hover: #818cf8;
30
- --background: #1a1a1a;
31
- --text: #f3f4f6;
32
- --user-bg: #2b3d4f;
33
- --bot-bg: #2d2d2d;
34
- --border: #404040;
35
- --input-bg: #2d2d2d;
36
- }
 
 
 
 
 
37
 
38
- .gradio-container {
39
- background: var(--background) !important;
40
- color: var(--text) !important;
41
- max-width: 900px !important;
42
- margin: 0 auto;
43
- font-family: 'Inter', system-ui, sans-serif;
44
- }
 
 
 
 
 
 
 
45
 
46
- .chat-container {
47
- height: 80vh;
48
- border: 1px solid var(--border);
49
- border-radius: var(--radius);
50
- overflow: hidden;
51
- box-shadow: var(--shadow);
52
- display: flex;
53
- flex-direction: column;
54
- }
55
 
56
- .chatbot {
57
- height: 100%;
58
- padding: 0 !important;
59
- background: transparent !important;
60
- display: flex;
61
- flex-direction: column;
62
- overflow-y: auto;
63
- }
64
 
65
- .chatbot .wrap {
66
- padding: 20px;
67
- flex-grow: 1;
68
- overflow-y: auto;
69
- }
70
 
71
- .message {
72
- padding: 12px 16px;
73
- border-radius: var(--radius);
74
- margin: 8px 0;
75
- max-width: 85%;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  line-height: 1.6;
77
- font-size: 15px;
78
- transition: all 0.3s ease;
79
- box-shadow: var(--shadow);
80
- }
81
-
82
- .user-message {
83
- background: var(--user-bg);
84
- margin-left: auto;
85
- border-bottom-right-radius: 4px;
86
- color: var(--text);
87
- }
88
-
89
- .bot-message {
90
- background: var(--bot-bg);
91
- margin-right: auto;
92
- border-bottom-left-radius: 4px;
93
- color: var(--text);
94
- }
95
-
96
- .input-container {
97
- padding: 16px;
98
- background: var(--background);
99
- border-top: 1px solid var(--border);
100
- display: flex;
101
- gap: 8px;
102
- }
103
-
104
- .textbox {
105
- background: var(--input-bg) !important;
106
- border-radius: var(--radius) !important;
107
- border: 1px solid var(--border) !important;
108
- padding: 12px 16px !important;
109
- }
110
-
111
- .textbox:focus {
112
- border-color: var(--primary) !important;
113
- box-shadow: 0 0 0 2px rgba(79, 70, 229, 0.2) !important;
114
- }
115
-
116
- .send-btn {
117
- background: var(--primary) !important;
118
- color: white !important;
119
- border-radius: var(--radius) !important;
120
- border: none !important;
121
- padding: 0 24px !important;
122
  }
123
 
124
- .send-btn:hover {
125
- background: var(--primary-hover) !important;
126
  }
127
 
128
- .dark-toggle {
129
- position: absolute;
130
- right: 20px;
131
- top: 20px;
132
- z-index: 1000;
133
- background: transparent !important;
134
- border: none !important;
135
- box-shadow: none !important;
136
- color: var(--text) !important;
137
- font-size: 20px !important;
138
  }
139
 
140
- .model-selector {
141
- padding: 0 16px 16px 16px;
142
- background: var(--background);
 
143
  }
144
-
145
- .controls {
146
- display: flex;
147
- justify-content: space-between;
148
- align-items: center;
149
- padding: 12px 16px;
150
- border-bottom: 1px solid var(--border);
151
- background: var(--background);
152
- }
153
-
154
- .typing-indicator {
155
- display: inline-flex;
156
- gap: 4px;
157
- align-items: center;
158
- color: #6b7280;
159
- font-size: 14px;
160
- }
161
-
162
- .typing-dot {
163
- width: 8px;
164
- height: 8px;
165
- background: #9ca3af;
166
- border-radius: 50%;
167
- animation: typing 1.4s infinite ease-in-out;
168
- }
169
-
170
- .typing-dot:nth-child(2) {
171
- animation-delay: 0.2s;
172
- }
173
-
174
- .typing-dot:nth-child(3) {
175
- animation-delay: 0.4s;
176
- }
177
-
178
- @keyframes typing {
179
- 0%, 60%, 100% { transform: translateY(0); }
180
- 30% { transform: translateY(-4px); }
181
- }
182
-
183
- .katex { font-size: 1.1em !important; }
184
  """
185
 
186
- def get_available_models() -> List[str]:
187
- try:
188
- response = requests.get(MODELS_URL, timeout=10)
189
- if response.status_code == 200:
190
- return sorted([m['id'] for m in response.json().get('data', [])])
191
- return ["samura-gpt-4o", "samura-claude-3-5-sonnet"]
192
- except Exception as e:
193
- print(f"Error fetching models: {e}")
194
- return ["samura-gpt-4o", "samura-claude-3-5-sonnet"]
195
-
196
- def chat_completion(messages: List[dict], model: str) -> dict:
197
- headers = {"Content-Type": "application/json"}
198
- data = {
199
- "model": model,
200
- "messages": messages,
201
- "temperature": 0.7,
202
- "max_tokens": 2500
203
- }
204
- try:
205
- response = requests.post(API_URL, headers=headers, json=data, timeout=40)
206
- response.raise_for_status()
207
- return response.json()
208
- except requests.exceptions.RequestException as e:
209
- return {"error": f"API request failed: {str(e)}"}
210
- except Exception as e:
211
- return {"error": f"Unexpected error: {str(e)}"}
212
-
213
- def format_message(text: str) -> str:
214
- if not text:
215
- return ""
216
- text = escape(text)
217
- # Handle block LaTeX ($$...$$)
218
- text = re.sub(r'\$\$(.*?)\$\$', r'<div class="katex-block">$$\1$$</div>', text, flags=re.DOTALL)
219
- # Handle inline LaTeX ($...$)
220
- text = re.sub(r'\$(.*?)\$', r'<span class="katex-inline">$\1$</span>', text)
221
- # Handle specific fraction case
222
- text = text.replace(r'\frac{1}{f} = \frac{1}{v} + \frac{1}{u}',
223
- r'<div class="katex-block">\frac{1}{f} = \frac{1}{v} + \frac{1}{u}</div>')
224
- return text.replace("\n", "<br>")
225
-
226
- def create_typing_indicator():
227
- return """
228
- <div class="typing-indicator">
229
- <div class="typing-dot"></div>
230
- <div class="typing-dot"></div>
231
- <div class="typing-dot"></div>
232
- </div>
233
- """
234
-
235
- def respond(message: str, chat_history: List[Tuple[str, str]], model: str):
236
- if not message.strip():
237
- yield chat_history, ""
238
- return
239
-
240
- # Add user message
241
- chat_history.append((format_message(message), None))
242
- yield chat_history, ""
243
 
244
- # Prepare API messages
245
- messages = []
246
- for user_msg, bot_msg in chat_history[:-1]:
247
- if user_msg:
248
- messages.append({"role": "user", "content": re.sub('<[^<]+?>', '', user_msg)})
249
- if bot_msg:
250
- messages.append({"role": "assistant", "content": re.sub('<[^<]+?>', '', bot_msg)})
 
251
 
252
- # Add typing indicator
253
- chat_history[-1] = (chat_history[-1][0], create_typing_indicator())
254
- yield chat_history, ""
 
 
 
 
 
 
 
 
255
 
256
- # Get API response
257
- response = chat_completion(messages + [{"role": "user", "content": message}], model)
 
 
 
258
 
259
- if "error" in response:
260
- bot_message = f"⚠️ Error: {response['error']}"
261
- else:
262
- bot_message = response.get("choices", [{}])[0].get("message", {}).get("content", "No response")
263
 
264
- # Update chat with final response
265
- chat_history[-1] = (chat_history[-1][0], format_message(bot_message))
266
- yield chat_history, ""
267
-
268
- def toggle_dark_mode():
269
- return """
270
- <script>
271
- (function() {
272
- const container = document.querySelector('.gradio-container');
273
- if (container) {
274
- container.classList.toggle('dark');
275
- localStorage.setItem('gradioDarkMode', container.classList.contains('dark'));
276
- }
277
- })();
278
- </script>
279
- """
280
-
281
- def load_dark_mode():
282
- return """
283
- <script>
284
- (function() {
285
- if (localStorage.getItem('gradioDarkMode') === 'true') {
286
- document.querySelector('.gradio-container').classList.add('dark');
287
- }
288
- })();
289
- </script>
290
- """
291
-
292
- def add_math_examples():
293
- examples = [
294
- ["Explain the formula $E = mc^2$"],
295
- ["Derive the lens formula $\frac{1}{f} = \frac{1}{v} + \frac{1}{u}$"],
296
- ["Solve the quadratic equation $ax^2 + bx + c = 0$"],
297
- ["What is the Pythagorean theorem?"],
298
- ["Explain the concept of derivatives in calculus"],
299
- ["How do I solve systems of linear equations?"]
300
- ]
301
- return examples
302
-
303
- with gr.Blocks(css=css, theme=gr.themes.Soft()) as app:
304
- # Add initial dark mode script
305
- app.load(None, None, None, _js=load_dark_mode())
306
-
307
- # Header with dark mode toggle
308
- with gr.Column():
309
- gr.Markdown("""
310
- <div style="display: flex; justify-content: space-between; align-items: center;">
311
- <h1 style="margin: 0;">Math Assistant 🤖</h1>
312
- <button onclick="document.querySelector('.dark-toggle').click()" style="background: none; border: none; cursor: pointer; font-size: 20px;">🌓</button>
313
- </div>
314
- <p style="margin-top: 8px; color: var(--text); opacity: 0.8;">Ask any math question and get detailed explanations</p>
315
- """)
316
-
317
- # Hidden dark mode toggle for JS
318
- dark_toggle = gr.Button("🌓", elem_classes="dark-toggle", visible=False)
319
- dark_toggle.click(None, _js=toggle_dark_mode())
320
 
321
- with gr.Column(elem_classes="chat-container"):
322
- # Controls
323
- with gr.Column(elem_classes="controls"):
324
- clear_btn = gr.Button("Clear Chat", size="sm")
325
- model_dropdown = gr.Dropdown(
326
- choices=get_available_models(),
327
- value="samura-gpt-4o",
328
- label="Model",
329
- interactive=True,
330
- scale=2
331
- )
332
-
333
- # Chat area
334
- chatbot = gr.Chatbot(
335
- elem_id="chatbot",
336
- bubble_full_width=False,
337
- avatar_images=(
338
- "https://i.imgur.com/hp3JdUq.png",
339
- "https://i.imgur.com/7WqB2Dn.png"
340
- ),
341
- show_label=False,
342
- height="100%"
343
- )
344
-
345
- # Input area
346
- with gr.Row(elem_classes="input-container"):
347
- msg = gr.Textbox(
348
- placeholder="Type your math question here...",
349
- lines=2,
350
- max_lines=5,
351
- show_label=False,
352
- container=False,
353
- elem_classes="textbox",
354
- autofocus=True
355
- )
356
- submit_btn = gr.Button("Send", elem_classes="send-btn")
357
-
358
- # Examples
359
- gr.Examples(
360
- examples=add_math_examples(),
361
- inputs=msg,
362
- label="Try these examples:",
363
- examples_per_page=3
364
- )
365
 
366
- # Event handlers
367
- msg.submit(respond, [msg, chatbot, model_dropdown], [chatbot, msg], queue=True)
368
- submit_btn.click(respond, [msg, chatbot, model_dropdown], [chatbot, msg], queue=True)
369
- clear_btn.click(lambda: [], None, chatbot, queue=False)
370
 
371
  if __name__ == "__main__":
372
- app.queue(concurrency_count=5).launch()
 
1
  import gradio as gr
2
  import requests
3
  from html import escape
 
 
 
4
 
5
+ # API configurations
6
+ DEEPSEEK_API_URL = "https://typegpt-api.aham2.com/v1/chat/completions" # TypeGPT API for deepseek-r1
7
+ DEFAULT_API_URL = "https://aham2api-3.onrender.com/v1/chat/completions" # Default API for other models
8
+
9
+ # List of available models
10
+ AVAILABLE_MODELS = [
11
+ "deepseek-r1",
12
+ "gpt-4o",
13
+ "gpt-4o-latest",
14
+ "chatgpt-4o-latest",
15
+ "gemini-1.5-pro",
16
+ "gemini-1.5-pro-latest",
17
+ "gemini-flash-2.0",
18
+ "gemini-1.5-flash",
19
+ "claude-3-5-sonnet",
20
+ "claude-3-5-sonnet-20240620",
21
+ "anthropic/claude-3.5-sonnet",
22
+ "mistral-large",
23
+ "deepseek-v3",
24
+ "llama-3.1-405b",
25
+ "Meta-Llama-3.1-405B-Instruct-Turbo",
26
+ "Meta-Llama-3.3-70B-Instruct-Turbo",
27
+ "grok-2",
28
+ "qwen-plus-latest",
29
+ "qwen-turbo-latest",
30
+ "dbrx-instruct",
31
+ "claude",
32
+ "qwen-2.5-32b",
33
+ "qwen-2.5-coder-32b",
34
+ "qwen-qwq-32b",
35
+ "gemma2-9b-it",
36
+ "deepseek-r1-distill-llama-70b",
37
+ "o3-mini",
38
+ "Claude-sonnet-3.7",
39
+ "type/deepseek-r1",
40
+ "samu/deepseek-r1"
41
+ ]
42
+
43
+ def get_api_url(model_name):
44
+ """Determine which API endpoint to use based on model selection"""
45
+ if model_name.lower() in ["deepseek-r1", "type/deepseek-r1", "samu/deepseek-r1"]:
46
+ return DEEPSEEK_API_URL
47
+ return DEFAULT_API_URL
48
+
49
+ def convert_latex(text):
50
+ """Convert LaTeX expressions to HTML for rendering"""
51
+ # Handle block equations
52
+ text = text.replace('$$', '</p><div class="latex-block">')
53
+ text = text.replace('\\[', '</p><div class="latex-block">')
54
+ text = text.replace('\\]', '</div><p>')
55
+
56
+ # Handle inline equations
57
+ text = text.replace('$', '<span class="latex-inline">')
58
+ text = text.replace('\\(', '<span class="latex-inline">')
59
+ text = text.replace('\\)', '</span>')
60
+
61
+ return text
62
 
63
+ def postprocess_message(message):
64
+ """Process message for HTML display with LaTeX support"""
65
+ if not message:
66
+ return ""
67
+
68
+ # Escape HTML first for security
69
+ message = escape(message)
70
+
71
+ # Convert LaTeX expressions
72
+ message = convert_latex(message)
73
+
74
+ # Handle newlines
75
+ message = message.replace('\n', '<br>')
76
+
77
+ return f'<div class="message-content">{message}</div>'
78
 
79
+ def chat_with_aham(user_message, history, selected_model):
80
+ if not user_message.strip():
81
+ return "Please enter a message.", history
82
+
83
+ # Prepare the conversation history for the API
84
+ messages = []
85
+
86
+ # Add previous conversation history
87
+ for user_msg, ai_msg in history:
88
+ messages.append({"role": "user", "content": user_msg})
89
+ messages.append({"role": "assistant", "content": ai_msg})
90
+
91
+ # Add the current user message
92
+ messages.append({"role": "user", "content": user_message})
93
 
94
+ payload = {
95
+ "model": selected_model,
96
+ "messages": messages,
97
+ "temperature": 0.7
98
+ }
 
 
 
 
99
 
100
+ headers = {"Content-Type": "application/json"}
101
+ api_url = get_api_url(selected_model)
 
 
 
 
 
 
102
 
103
+ try:
104
+ response = requests.post(api_url, json=payload, headers=headers)
 
 
 
105
 
106
+ if response.status_code == 200:
107
+ data = response.json()
108
+ ai_response = data.get("choices", [{}])[0].get("message", {}).get("content", "No response received.")
109
+
110
+ # Append the current interaction to history
111
+ history.append((user_message, ai_response))
112
+ return "", history # Clear input, return updated history
113
+ else:
114
+ error_msg = f"Error: {response.status_code}, {response.text}"
115
+ history.append((user_message, error_msg))
116
+ return "", history
117
+
118
+ except Exception as e:
119
+ error_msg = f"Error: {str(e)}"
120
+ history.append((user_message, error_msg))
121
+ return "", history
122
+
123
+ # Custom CSS for LaTeX rendering and chat interface
124
+ custom_css = """
125
+ .latex-block {
126
+ padding: 10px;
127
+ margin: 10px 0;
128
+ background-color: #f5f5f5;
129
+ border-radius: 5px;
130
+ overflow-x: auto;
131
+ text-align: center;
132
+ font-family: monospace;
133
+ }
134
+
135
+ .latex-inline {
136
+ background-color: #f0f0f0;
137
+ padding: 2px 4px;
138
+ border-radius: 3px;
139
+ font-family: monospace;
140
+ }
141
+
142
+ .message-content {
143
  line-height: 1.6;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  }
145
 
146
+ .dark .latex-block, .dark .latex-inline {
147
+ background-color: #2d2d2d;
148
  }
149
 
150
+ #chatbot {
151
+ min-height: 500px;
 
 
 
 
 
 
 
 
152
  }
153
 
154
+ .model-info {
155
+ font-size: 0.9em;
156
+ color: #666;
157
+ margin-top: 5px;
158
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  """
160
 
161
+ # Gradio UI with ChatInterface
162
+ with gr.Blocks(title="Aham AI Chat", css=custom_css) as demo:
163
+ gr.Markdown("## Chat with Aham AI (with LaTeX support)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
 
165
+ with gr.Row():
166
+ model_dropdown = gr.Dropdown(
167
+ choices=AVAILABLE_MODELS,
168
+ value="deepseek-r1",
169
+ label="Select Model",
170
+ interactive=True
171
+ )
172
+ gr.Markdown("<div class='model-info'>deepseek-r1 uses TypeGPT API</div>")
173
 
174
+ chatbot = gr.Chatbot(
175
+ height=500,
176
+ bubble_full_width=False,
177
+ render_markdown=True,
178
+ latex_delimiters=[
179
+ {"left": "$$", "right": "$$", "display": True},
180
+ {"left": "$", "right": "$", "display": False},
181
+ {"left": "\\(", "right": "\\)", "display": False},
182
+ {"left": "\\[", "right": "\\]", "display": True}
183
+ ]
184
+ )
185
 
186
+ msg = gr.Textbox(
187
+ label="Your Message",
188
+ placeholder="Type your message here... (use $ for inline LaTeX and $$ for block equations)",
189
+ lines=3
190
+ )
191
 
192
+ with gr.Row():
193
+ clear = gr.Button("Clear Conversation", variant="secondary")
194
+ submit = gr.Button("Send", variant="primary")
 
195
 
196
+ examples = gr.Examples(
197
+ examples=[
198
+ ["What is the Pythagorean theorem?"],
199
+ ["Explain the formula $E = mc^2$"],
200
+ ["How do I solve this equation? $$\\int_0^1 x^2 dx$$"],
201
+ ["What's the derivative of $f(x) = \\sin(x)$?"]
202
+ ],
203
+ inputs=msg,
204
+ label="Example Questions (Click to try)"
205
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
 
207
+ def respond(message, chat_history, model):
208
+ return chat_with_aham(message, chat_history, model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
210
+ msg.submit(respond, [msg, chatbot, model_dropdown], [msg, chatbot])
211
+ submit.click(respond, [msg, chatbot, model_dropdown], [msg, chatbot])
212
+ clear.click(lambda: None, None, chatbot, queue=False)
 
213
 
214
  if __name__ == "__main__":
215
+ demo.launch()