AILaborant commited on
Commit
8edc821
·
verified ·
1 Parent(s): 2e34ce1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +215 -0
app.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import gradio as gr
3
+ from openai import OpenAI
4
+ import re
5
+
6
+ # --- Configuration ---
7
+ # Point this to your local LLM (e.g., Llama.cpp, vLLM, Ollama)
8
+ # If using Ollama, URL is usually http://localhost:11434/v1
9
+ client = OpenAI(base_url="http://localhost:8080/v1", api_key="no-key-required")
10
+
11
+ CSS = """
12
+ .spinner { animation: spin 1s linear infinite; display: inline-block; margin-right: 8px; }
13
+ @keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } }
14
+ .thinking-summary { cursor: pointer; padding: 8px; background: #f5f5f5; border-radius: 4px; margin: 4px 0; }
15
+ .thinking-container { border-left: 3px solid #facc15; padding-left: 10px; margin: 8px 0; background: #210c29; }
16
+ details:not([open]) .thinking-container { border-left-color: #290c15; }
17
+ details { border: 1px solid #e0e0e0 !important; border-radius: 8px !important; padding: 12px !important; margin: 8px 0 !important; }
18
+ """
19
+
20
+ def format_time(seconds_float):
21
+ total_seconds = int(round(seconds_float))
22
+ hours = total_seconds // 3600
23
+ remaining = total_seconds % 3600
24
+ minutes = remaining // 60
25
+ seconds = remaining % 60
26
+ if hours > 0: return f"{hours}h {minutes}m {seconds}s"
27
+ elif minutes > 0: return f"{minutes}m {seconds}s"
28
+ return f"{seconds}s"
29
+
30
+ # --- Web UI Logic (HTML/Streaming) ---
31
+
32
+ class ParserState:
33
+ __slots__ = ['answer', 'thought', 'in_think', 'start_time', 'last_pos', 'total_think_time']
34
+ def __init__(self):
35
+ self.answer = ""
36
+ self.thought = ""
37
+ self.in_think = False
38
+ self.start_time = 0
39
+ self.last_pos = 0
40
+ self.total_think_time = 0.0
41
+
42
+ def parse_response(text, state):
43
+ buffer = text[state.last_pos:]
44
+ state.last_pos = len(text)
45
+ while buffer:
46
+ if not state.in_think:
47
+ think_start = buffer.find('<think>')
48
+ if think_start != -1:
49
+ state.answer += buffer[:think_start]
50
+ state.in_think = True
51
+ state.start_time = time.perf_counter()
52
+ buffer = buffer[think_start + 7:]
53
+ else:
54
+ state.answer += buffer
55
+ break
56
+ else:
57
+ think_end = buffer.find('</think>')
58
+ if think_end != -1:
59
+ state.thought += buffer[:think_end]
60
+ duration = time.perf_counter() - state.start_time
61
+ state.total_think_time += duration
62
+ state.in_think = False
63
+ buffer = buffer[think_end + 8:]
64
+ else:
65
+ state.thought += buffer
66
+ break
67
+ elapsed = time.perf_counter() - state.start_time if state.in_think else 0
68
+ return state, elapsed
69
+
70
+ def format_ui_response(state, elapsed):
71
+ answer_part = state.answer.replace('<think>', '').replace('</think>', '')
72
+ collapsible = []
73
+ collapsed = "<details open>"
74
+
75
+ if state.thought or state.in_think:
76
+ if state.in_think:
77
+ total_elapsed = state.total_think_time + elapsed
78
+ status = f"🌀 Thinking for {format_time(total_elapsed)}"
79
+ else:
80
+ status = f"✅ Thought for {format_time(state.total_think_time)}"
81
+ collapsed = "<details>"
82
+
83
+ collapsible.append(
84
+ f"{collapsed}<summary>{status}</summary>\n\n<div class='thinking-container'>\n{state.thought}\n</div>\n</details>"
85
+ )
86
+ return collapsible, answer_part
87
+
88
+ def generate_web_response(history, temperature, top_p, max_tokens, active_gen):
89
+ messages = [{"role": "user", "content": history[-1][0]}]
90
+ # Add history context if needed for Web UI (optional, usually handled by Chatbot component)
91
+
92
+ full_response = ""
93
+ state = ParserState()
94
+
95
+ try:
96
+ stream = client.chat.completions.create(
97
+ model="local-model", # Model name is ignored by most local servers
98
+ messages=messages,
99
+ temperature=temperature,
100
+ top_p=top_p,
101
+ max_tokens=max_tokens,
102
+ stream=True
103
+ )
104
+
105
+ for chunk in stream:
106
+ if not active_gen[0]: break
107
+ if chunk.choices[0].delta.content:
108
+ full_response += chunk.choices[0].delta.content
109
+ state, elapsed = parse_response(full_response, state)
110
+ collapsible, answer_part = format_ui_response(state, elapsed)
111
+ history[-1][1] = "\n\n".join(collapsible + [answer_part])
112
+ yield history
113
+
114
+ # Final pass
115
+ state, elapsed = parse_response(full_response, state)
116
+ collapsible, answer_part = format_ui_response(state, elapsed)
117
+ history[-1][1] = "\n\n".join(collapsible + [answer_part])
118
+ yield history
119
+
120
+ except Exception as e:
121
+ history[-1][1] = f"Error: {str(e)}"
122
+ yield history
123
+ finally:
124
+ active_gen[0] = False
125
+
126
+ def user(message, history):
127
+ return "", history + [[message, None]]
128
+
129
+ # --- API Logic (Discord Bot) ---
130
+
131
+ def discord_api_endpoint(prompt, history_json):
132
+ """
133
+ API Endpoint for Discord.
134
+ Args:
135
+ prompt: The user's message.
136
+ history_json: List of [user, bot] lists from previous context.
137
+ Returns:
138
+ String containing the formatted response.
139
+ """
140
+ # 1. Reconstruct messages for OpenAI Client
141
+ messages = []
142
+ # Add system prompt if desired
143
+ # messages.append({"role": "system", "content": "You are a helpful assistant."})
144
+
145
+ # History comes in as [[user, bot], [user, bot]]
146
+ for pair in history_json:
147
+ if pair[0]: messages.append({"role": "user", "content": pair[0]})
148
+ if pair[1]: messages.append({"role": "assistant", "content": pair[1]})
149
+
150
+ messages.append({"role": "user", "content": prompt})
151
+
152
+ try:
153
+ # Non-streaming request for the bot to ensure we get full completion before sending
154
+ response = client.chat.completions.create(
155
+ model="local-model",
156
+ messages=messages,
157
+ temperature=0.7,
158
+ max_tokens=4096
159
+ )
160
+ raw_content = response.choices[0].message.content
161
+
162
+ # Parse <think> tags for Discord Markdown
163
+ # We replace <think> content with a Discord blockquote (>>> or >)
164
+ def replace_think(match):
165
+ thought_content = match.group(1).strip()
166
+ # Format as italicized quote
167
+ return f"> *Thinking Process:*\n> {thought_content}\n\n"
168
+
169
+ # Regex to find <think>...</think> (dotall to match newlines)
170
+ formatted_content = re.sub(r'<think>(.*?)</think>', replace_think, raw_content, flags=re.DOTALL)
171
+
172
+ return formatted_content
173
+
174
+ except Exception as e:
175
+ return f"❌ **Error from backend:** {str(e)}"
176
+
177
+ # --- Interface Setup ---
178
+
179
+ with gr.Blocks(css=CSS) as demo:
180
+ gr.Markdown("## Qwen/Reasoning Model Host")
181
+
182
+ active_gen = gr.State([False])
183
+ chatbot = gr.Chatbot(elem_id="chatbot", height=500, show_label=False, render_markdown=True)
184
+
185
+ with gr.Row():
186
+ msg = gr.Textbox(label="Message", placeholder="Type message...", scale=4)
187
+ submit_btn = gr.Button("Send", variant='primary', scale=1)
188
+
189
+ with gr.Accordion("Parameters", open=False):
190
+ temperature = gr.Slider(0.1, 1.5, 0.6, label="Temperature")
191
+ top_p = gr.Slider(0.1, 1.0, 0.95, label="Top-p")
192
+ max_tokens = gr.Slider(2048, 32768, 4096, step=64, label="Max Tokens")
193
+
194
+ # UI Events
195
+ submit_event = submit_btn.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
196
+ lambda: [True], outputs=active_gen).then(
197
+ generate_web_response, [chatbot, temperature, top_p, max_tokens, active_gen], chatbot
198
+ )
199
+ msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
200
+ lambda: [True], outputs=active_gen).then(
201
+ generate_web_response, [chatbot, temperature, top_p, max_tokens, active_gen], chatbot
202
+ )
203
+
204
+ # --- HIDDEN API COMPONENT ---
205
+ # We create a hidden button/function specifically to expose the API
206
+ api_trigger = gr.Button("API Trigger", visible=False)
207
+ api_trigger.click(
208
+ fn=discord_api_endpoint,
209
+ inputs=[gr.Textbox(label="Prompt"), gr.State(label="History")], # Virtual inputs
210
+ outputs=[gr.Textbox(label="Response")],
211
+ api_name="discord_chat" # <--- THIS IS THE ENDPOINT NAME
212
+ )
213
+
214
+ if __name__ == "__main__":
215
+ demo.launch(server_name="0.0.0.0", server_port=7860)