huijio commited on
Commit
3bd322d
·
verified ·
1 Parent(s): 9af3cdd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +164 -188
app.py CHANGED
@@ -1,215 +1,191 @@
1
  import gradio as gr
2
  import requests
3
  from html import escape
4
-
5
- # API configurations
6
- DEEPSEEK_API_URL = "https://typegpt-api.aham2.com/v1/chat/completions" # TypeGPT API for deepseek-r1
7
- DEFAULT_API_URL = "https://aham2api-3.onrender.com/v1/chat/completions" # Default API for other models
8
-
9
- # List of available models
10
- AVAILABLE_MODELS = [
11
- "deepseek-r1",
12
- "gpt-4o",
13
- "gpt-4o-latest",
14
- "chatgpt-4o-latest",
15
- "gemini-1.5-pro",
16
- "gemini-1.5-pro-latest",
17
- "gemini-flash-2.0",
18
- "gemini-1.5-flash",
19
- "claude-3-5-sonnet",
20
- "claude-3-5-sonnet-20240620",
21
- "anthropic/claude-3.5-sonnet",
22
- "mistral-large",
23
- "deepseek-v3",
24
- "llama-3.1-405b",
25
- "Meta-Llama-3.1-405B-Instruct-Turbo",
26
- "Meta-Llama-3.3-70B-Instruct-Turbo",
27
- "grok-2",
28
- "qwen-plus-latest",
29
- "qwen-turbo-latest",
30
- "dbrx-instruct",
31
- "claude",
32
- "qwen-2.5-32b",
33
- "qwen-2.5-coder-32b",
34
- "qwen-qwq-32b",
35
- "gemma2-9b-it",
36
- "deepseek-r1-distill-llama-70b",
37
- "o3-mini",
38
- "Claude-sonnet-3.7",
39
- "type/deepseek-r1",
40
- "samu/deepseek-r1"
41
- ]
42
-
43
- def get_api_url(model_name):
44
- """Determine which API endpoint to use based on model selection"""
45
- if model_name.lower() in ["deepseek-r1", "type/deepseek-r1", "samu/deepseek-r1"]:
46
- return DEEPSEEK_API_URL
47
- return DEFAULT_API_URL
48
-
49
- def convert_latex(text):
50
- """Convert LaTeX expressions to HTML for rendering"""
51
- # Handle block equations
52
- text = text.replace('$$', '</p><div class="latex-block">')
53
- text = text.replace('\\[', '</p><div class="latex-block">')
54
- text = text.replace('\\]', '</div><p>')
55
-
56
- # Handle inline equations
57
- text = text.replace('$', '<span class="latex-inline">')
58
- text = text.replace('\\(', '<span class="latex-inline">')
59
- text = text.replace('\\)', '</span>')
60
-
61
- return text
62
-
63
- def postprocess_message(message):
64
- """Process message for HTML display with LaTeX support"""
65
- if not message:
66
- return ""
67
-
68
- # Escape HTML first for security
69
- message = escape(message)
70
-
71
- # Convert LaTeX expressions
72
- message = convert_latex(message)
73
-
74
- # Handle newlines
75
- message = message.replace('\n', '<br>')
76
-
77
- return f'<div class="message-content">{message}</div>'
78
-
79
- def chat_with_aham(user_message, history, selected_model):
80
- if not user_message.strip():
81
- return "Please enter a message.", history
82
-
83
- # Prepare the conversation history for the API
84
- messages = []
85
-
86
- # Add previous conversation history
87
- for user_msg, ai_msg in history:
88
- messages.append({"role": "user", "content": user_msg})
89
- messages.append({"role": "assistant", "content": ai_msg})
90
-
91
- # Add the current user message
92
- messages.append({"role": "user", "content": user_message})
93
-
94
- payload = {
95
- "model": selected_model,
96
- "messages": messages,
97
- "temperature": 0.7
98
- }
99
-
100
- headers = {"Content-Type": "application/json"}
101
- api_url = get_api_url(selected_model)
102
-
103
- try:
104
- response = requests.post(api_url, json=payload, headers=headers)
105
-
106
- if response.status_code == 200:
107
- data = response.json()
108
- ai_response = data.get("choices", [{}])[0].get("message", {}).get("content", "No response received.")
109
-
110
- # Append the current interaction to history
111
- history.append((user_message, ai_response))
112
- return "", history # Clear input, return updated history
113
- else:
114
- error_msg = f"Error: {response.status_code}, {response.text}"
115
- history.append((user_message, error_msg))
116
- return "", history
117
-
118
- except Exception as e:
119
- error_msg = f"Error: {str(e)}"
120
- history.append((user_message, error_msg))
121
- return "", history
122
-
123
- # Custom CSS for LaTeX rendering and chat interface
124
- custom_css = """
125
  .latex-block {
126
- padding: 10px;
 
127
  margin: 10px 0;
128
- background-color: #f5f5f5;
129
- border-radius: 5px;
130
  overflow-x: auto;
131
- text-align: center;
132
- font-family: monospace;
133
  }
134
-
135
  .latex-inline {
136
- background-color: #f0f0f0;
137
- padding: 2px 4px;
138
- border-radius: 3px;
139
- font-family: monospace;
140
- }
141
-
142
- .message-content {
143
- line-height: 1.6;
144
  }
145
-
146
- .dark .latex-block, .dark .latex-inline {
147
- background-color: #2d2d2d;
148
- }
149
-
150
  #chatbot {
151
- min-height: 500px;
152
  }
153
-
154
- .model-info {
155
- font-size: 0.9em;
156
- color: #666;
157
- margin-top: 5px;
 
158
  }
159
  """
160
 
161
- # Gradio UI with ChatInterface
162
- with gr.Blocks(title="Aham AI Chat", css=custom_css) as demo:
163
- gr.Markdown("## Chat with Aham AI (with LaTeX support)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  with gr.Row():
166
  model_dropdown = gr.Dropdown(
167
- choices=AVAILABLE_MODELS,
168
- value="deepseek-r1",
169
- label="Select Model",
170
- interactive=True
 
 
 
171
  )
172
- gr.Markdown("<div class='model-info'>deepseek-r1 uses TypeGPT API</div>")
173
 
 
174
  chatbot = gr.Chatbot(
175
- height=500,
176
  bubble_full_width=False,
177
- render_markdown=True,
178
- latex_delimiters=[
179
- {"left": "$$", "right": "$$", "display": True},
180
- {"left": "$", "right": "$", "display": False},
181
- {"left": "\\(", "right": "\\)", "display": False},
182
- {"left": "\\[", "right": "\\]", "display": True}
183
- ]
184
- )
185
-
186
- msg = gr.Textbox(
187
- label="Your Message",
188
- placeholder="Type your message here... (use $ for inline LaTeX and $$ for block equations)",
189
- lines=3
190
  )
191
 
192
- with gr.Row():
193
- clear = gr.Button("Clear Conversation", variant="secondary")
194
- submit = gr.Button("Send", variant="primary")
195
-
196
- examples = gr.Examples(
197
- examples=[
198
- ["What is the Pythagorean theorem?"],
199
- ["Explain the formula $E = mc^2$"],
200
- ["How do I solve this equation? $$\\int_0^1 x^2 dx$$"],
201
- ["What's the derivative of $f(x) = \\sin(x)$?"]
202
- ],
203
- inputs=msg,
204
- label="Example Questions (Click to try)"
205
- )
206
 
207
- def respond(message, chat_history, model):
208
- return chat_with_aham(message, chat_history, model)
209
 
210
- msg.submit(respond, [msg, chatbot, model_dropdown], [msg, chatbot])
211
- submit.click(respond, [msg, chatbot, model_dropdown], [msg, chatbot])
212
- clear.click(lambda: None, None, chatbot, queue=False)
213
 
214
  if __name__ == "__main__":
215
- demo.launch()
 
1
  import gradio as gr
2
  import requests
3
  from html import escape
4
+ import re
5
+ from typing import List, Tuple
6
+
7
+ # API endpoints
8
+ API_URL = "https://aham2api-3.onrender.com/v1/chat/completions"
9
+ MODELS_URL = "https://aham2api-3.onrender.com/v1/models"
10
+
11
+ # Enhanced CSS with wider layout and better LaTeX support
12
+ css = """
13
+ .gradio-container {
14
+ max-width: 1000px !important;
15
+ margin: 0 auto;
16
+ padding: 0 !important;
17
+ }
18
+ .chat-container {
19
+ display: flex;
20
+ flex-direction: column;
21
+ height: 85vh;
22
+ gap: 0;
23
+ }
24
+ .chatbot {
25
+ flex-grow: 1;
26
+ overflow-y: auto;
27
+ border: none;
28
+ border-radius: 0;
29
+ padding: 12px 20px !important;
30
+ margin: 0;
31
+ background: transparent;
32
+ width: 100% !important;
33
+ }
34
+ .input-container {
35
+ display: flex;
36
+ gap: 8px;
37
+ padding: 12px 0;
38
+ background: white;
39
+ position: sticky;
40
+ bottom: 0;
41
+ width: 100%;
42
+ }
43
+ .message {
44
+ padding: 10px 16px;
45
+ border-radius: 8px;
46
+ margin: 6px 0;
47
+ max-width: 90%;
48
+ line-height: 1.5;
49
+ font-size: 15.5px;
50
+ }
51
+ .user-message {
52
+ background: #f0f7ff;
53
+ margin-left: auto;
54
+ border-bottom-right-radius: 0;
55
+ }
56
+ .bot-message {
57
+ background: #f7f7f8;
58
+ margin-right: auto;
59
+ border-bottom-left-radius: 0;
60
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  .latex-block {
62
+ background: #f8f9fa;
63
+ padding: 14px;
64
  margin: 10px 0;
65
+ border-radius: 6px;
 
66
  overflow-x: auto;
67
+ font-size: 16px;
 
68
  }
 
69
  .latex-inline {
70
+ background: #f0f0f0;
71
+ padding: 2px 6px;
72
+ border-radius: 4px;
73
+ font-size: 16px;
 
 
 
 
74
  }
 
 
 
 
 
75
  #chatbot {
76
+ padding: 15px 25px !important;
77
  }
78
+ #component-2 {
79
+ min-height: 42px !important;
80
+ font-size: 15px;
81
+ }
82
+ .model-dropdown {
83
+ margin-bottom: 10px !important;
84
  }
85
  """
86
 
87
+ def get_available_models() -> List[str]:
88
+ try:
89
+ response = requests.get(MODELS_URL, timeout=10)
90
+ response.raise_for_status()
91
+ models_data = response.json()
92
+ return sorted([model['id'] for model in models_data.get('data', [])])
93
+ except Exception:
94
+ return ["samura-gpt-4o", "samura-claude-3-5-sonnet"]
95
+
96
+ def chat_completion(messages: List[dict], model: str) -> dict:
97
+ headers = {"Content-Type": "application/json"}
98
+ data = {
99
+ "model": model,
100
+ "messages": messages,
101
+ "temperature": 0.7,
102
+ "max_tokens": 2500
103
+ }
104
+ try:
105
+ response = requests.post(API_URL, headers=headers, json=data, timeout=40)
106
+ response.raise_for_status()
107
+ return response.json()
108
+ except Exception as e:
109
+ return {"error": str(e)}
110
+
111
+ def format_message(text: str) -> str:
112
+ text = escape(text)
113
+ # Enhanced LaTeX processing
114
+ text = re.sub(r'\$(.*?)\$', r'<span class="latex-inline">\(\1\)</span>', text)
115
+ text = re.sub(r'\$\$(.*?)\$\$', r'<div class="latex-block">\[\1\]</div>', text, flags=re.DOTALL)
116
+ return text.replace("\n", "<br>")
117
+
118
+ def respond(message: str, chat_history: List[Tuple[str, str]], model: str):
119
+ if not message.strip():
120
+ return chat_history, ""
121
+
122
+ # Add user message
123
+ user_message_html = format_message(message)
124
+ chat_history.append((user_message_html, ""))
125
 
126
+ # Prepare API messages
127
+ messages = []
128
+ for user_msg, bot_msg in chat_history[:-1]:
129
+ if user_msg:
130
+ clean_msg = re.sub('<[^<]+?>', '', user_msg).replace("<br>", "\n")
131
+ messages.append({"role": "user", "content": clean_msg})
132
+ if bot_msg:
133
+ clean_msg = re.sub('<[^<]+?>', '', bot_msg).replace("<br>", "\n")
134
+ messages.append({"role": "assistant", "content": clean_msg})
135
+ messages.append({"role": "user", "content": message})
136
+
137
+ # Get response
138
+ response = chat_completion(messages, model)
139
+ bot_message = response.get("choices", [{}])[0].get("message", {}).get("content", "No response") if "error" not in response else f"Error: {response['error']}"
140
+
141
+ # Update chat
142
+ chat_history[-1] = (chat_history[-1][0], format_message(bot_message))
143
+ return chat_history, ""
144
+
145
+ with gr.Blocks(css=css, theme=gr.themes.Soft()) as app:
146
+ gr.Markdown("""<h1 style='text-align: center; margin-bottom: 12px; font-size: 22px;'>Advanced Math Chat</h1>""")
147
+
148
+ # Model dropdown at top
149
  with gr.Row():
150
  model_dropdown = gr.Dropdown(
151
+ choices=get_available_models(),
152
+ value="samura-gpt-4o",
153
+ label="Model",
154
+ container=False,
155
+ elem_classes=["model-dropdown"],
156
+ scale=1,
157
+ min_width=250
158
  )
 
159
 
160
+ # Chat area - wider with more space
161
  chatbot = gr.Chatbot(
162
+ elem_id="chatbot",
163
  bubble_full_width=False,
164
+ show_label=False,
165
+ avatar_images=(None, None),
166
+ height="100%",
167
+ layout="panel"
 
 
 
 
 
 
 
 
 
168
  )
169
 
170
+ # Input area - wider
171
+ with gr.Row(elem_classes=["input-container"]):
172
+ msg = gr.Textbox(
173
+ placeholder="Type your math question or equation...",
174
+ show_label=False,
175
+ container=False,
176
+ scale=9,
177
+ autofocus=True,
178
+ elem_id="component-2",
179
+ lines=2
180
+ )
181
+ submit_btn = gr.Button("Send", variant="primary", scale=1, min_width=100)
 
 
182
 
183
+ # Clear button
184
+ clear_btn = gr.ClearButton([msg, chatbot], size="sm")
185
 
186
+ # Event handlers
187
+ msg.submit(respond, [msg, chatbot, model_dropdown], [chatbot, msg])
188
+ submit_btn.click(respond, [msg, chatbot, model_dropdown], [chatbot, msg])
189
 
190
  if __name__ == "__main__":
191
+ app.launch()