mahfuj735 commited on
Commit
3a9cff7
ยท
verified ยท
1 Parent(s): 845197f
Files changed (1) hide show
  1. app.py +629 -0
app.py CHANGED
@@ -0,0 +1,629 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from dotenv import load_dotenv
4
+ import openai
5
+ from openai import AsyncOpenAI
6
+ import asyncio
7
+ import json
8
+ import time
9
+ from datetime import datetime
10
+ from typing import List, Dict, Any
11
+
12
+ # Load environment variables
13
+ load_dotenv()
14
+
15
+ # Initialize client with API key from environment
16
+ initial_api_key = os.getenv("OPENAI_API_KEY")
17
+
18
+ # Global variables
19
+ current_api_key = initial_api_key
20
+ current_model = "gpt-4o-mini"
21
+ client = None
22
+ conversation_history = []
23
+ system_prompt = "You are a helpful, creative, and intelligent AI assistant. You provide accurate, detailed, and engaging responses while being friendly and professional."
24
+
25
+ # Initialize client if API key is available
26
+ if initial_api_key:
27
+ client = AsyncOpenAI(api_key=initial_api_key)
28
+
29
+ # Predefined system prompts
30
+ SYSTEM_PROMPTS = {
31
+ "Default Assistant": "You are a helpful, creative, and intelligent AI assistant. You provide accurate, detailed, and engaging responses while being friendly and professional.",
32
+ "Creative Writer": "You are a creative writing assistant. Help users with storytelling, creative writing, poetry, and imaginative content. Be expressive and inspiring.",
33
+ "Code Expert": "You are a programming expert. Provide clear, well-commented code solutions, explain programming concepts, and help debug issues. Focus on best practices and clean code.",
34
+ "Academic Tutor": "You are an academic tutor. Explain complex concepts clearly, provide step-by-step solutions, and help students understand difficult topics across various subjects.",
35
+ "Business Analyst": "You are a business consultant. Provide strategic insights, analyze market trends, suggest business solutions, and help with professional decision-making.",
36
+ "Health & Wellness": "You are a health and wellness advisor. Provide general health information, wellness tips, and lifestyle advice. Always remind users to consult healthcare professionals for medical issues.",
37
+ "Travel Guide": "You are a travel expert. Provide destination recommendations, travel tips, cultural insights, and help plan memorable trips around the world.",
38
+ "Tech Support": "You are a technical support specialist. Help troubleshoot technology issues, explain technical concepts simply, and provide step-by-step solutions.",
39
+ }
40
+
41
+ async def get_openai_response_stream(messages, model="gpt-4o-mini", temperature=0.7, max_tokens=2000):
42
+ """Get streaming response from OpenAI API"""
43
+ try:
44
+ stream = await client.chat.completions.create(
45
+ model=model,
46
+ messages=messages,
47
+ temperature=temperature,
48
+ max_tokens=max_tokens,
49
+ stream=True
50
+ )
51
+
52
+ full_response = ""
53
+ async for chunk in stream:
54
+ if chunk.choices[0].delta.content is not None:
55
+ content = chunk.choices[0].delta.content
56
+ full_response += content
57
+ yield full_response
58
+ except Exception as e:
59
+ yield f"Error: {str(e)}"
60
+
61
+ def update_conversation_history(user_msg, assistant_msg, system_msg):
62
+ """Update the global conversation history"""
63
+ global conversation_history
64
+
65
+ # Add system message if it's the first message or changed
66
+ if not conversation_history or conversation_history[0]["content"] != system_msg:
67
+ conversation_history = [{"role": "system", "content": system_msg}]
68
+
69
+ # Add user and assistant messages
70
+ conversation_history.append({"role": "user", "content": user_msg})
71
+ conversation_history.append({"role": "assistant", "content": assistant_msg})
72
+
73
+ # Keep only last 20 messages (plus system prompt) to manage context length
74
+ if len(conversation_history) > 41: # 1 system + 40 messages
75
+ conversation_history = conversation_history[:1] + conversation_history[-40:]
76
+
77
+ async def chat_response_stream(message, history, api_key, model, temperature, max_tokens, system_prompt_choice, custom_system_prompt):
78
+ """Handle streaming chat response"""
79
+ global client, current_api_key, current_model, system_prompt
80
+
81
+ # Update API key if provided and different
82
+ if api_key and api_key.strip() != current_api_key:
83
+ current_api_key = api_key.strip()
84
+ if current_api_key:
85
+ client = AsyncOpenAI(api_key=current_api_key)
86
+ else:
87
+ client = None
88
+ elif not api_key and current_api_key:
89
+ current_api_key = None
90
+ client = None
91
+
92
+ # Check if client exists
93
+ if client is None:
94
+ history.append({"role": "user", "content": message})
95
+ history.append({"role": "assistant", "content": "๐Ÿ”‘ Please provide your OpenAI API key to start the conversation."})
96
+ return history, ""
97
+
98
+ # Update model
99
+ current_model = model
100
+
101
+ # Update system prompt
102
+ if system_prompt_choice == "Custom" and custom_system_prompt.strip():
103
+ system_prompt = custom_system_prompt.strip()
104
+ else:
105
+ system_prompt = SYSTEM_PROMPTS.get(system_prompt_choice, SYSTEM_PROMPTS["Default Assistant"])
106
+
107
+ if not message.strip():
108
+ history.append({"role": "user", "content": message})
109
+ history.append({"role": "assistant", "content": "โš ๏ธ Please enter a message to continue our conversation."})
110
+ return history, ""
111
+
112
+ # Add user message to history
113
+ history.append({"role": "user", "content": message})
114
+ history.append({"role": "assistant", "content": "๐Ÿค” Thinking..."})
115
+
116
+ # Prepare messages for API
117
+ messages = [{"role": "system", "content": system_prompt}]
118
+ for msg in history[:-1]: # Exclude the "Thinking..." message
119
+ messages.append(msg)
120
+
121
+ # Get streaming response
122
+ full_response = ""
123
+ try:
124
+ async for partial_response in get_openai_response_stream(messages, current_model, temperature, max_tokens):
125
+ full_response = partial_response
126
+ history[-1]["content"] = full_response
127
+ yield history, ""
128
+
129
+ # Update conversation history for context
130
+ update_conversation_history(message, full_response, system_prompt)
131
+
132
+ except Exception as e:
133
+ history[-1]["content"] = f"โŒ Error: {str(e)}"
134
+ yield history, ""
135
+
136
+ def clear_chat():
137
+ """Clear the chat history and conversation memory"""
138
+ global conversation_history
139
+ conversation_history = []
140
+ return []
141
+
142
+ def export_conversation(history):
143
+ """Export conversation as JSON"""
144
+ if not history:
145
+ return None
146
+
147
+ export_data = {
148
+ "timestamp": datetime.now().isoformat(),
149
+ "model": current_model,
150
+ "system_prompt": system_prompt,
151
+ "conversation": history
152
+ }
153
+
154
+ filename = f"conversation_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
155
+ with open(filename, 'w', encoding='utf-8') as f:
156
+ json.dump(export_data, f, indent=2, ensure_ascii=False)
157
+
158
+ return filename
159
+
160
+ def get_status_info():
161
+ """Get current status information"""
162
+ status = f"๐ŸŸข **Active** | Model: {current_model} | Messages: {len(conversation_history)} | Time: {datetime.now().strftime('%H:%M:%S')}"
163
+ return status
164
+
165
+ def get_model_info(model):
166
+ """Get information about the selected model"""
167
+ model_info = {
168
+ "gpt-3.5-turbo": "โšก **GPT-3.5 Turbo** - Fast and efficient for most tasks. Cost-effective choice.",
169
+ "gpt-4": "๐Ÿง  **GPT-4** - Most capable model with superior reasoning. Higher cost but better quality.",
170
+ "gpt-4-turbo": "๐Ÿš€ **GPT-4 Turbo** - Latest GPT-4 with improved performance and larger context window.",
171
+ "gpt-4o": "โœจ **GPT-4o** - Optimized for conversation with multimodal capabilities.",
172
+ "gpt-4o-mini": "๐Ÿ’ซ **GPT-4o Mini** - Lightweight version of GPT-4o. Great balance of speed and capability."
173
+ }
174
+ return model_info.get(model, "๐Ÿ“‹ Model information not available.")
175
+
176
+ # Enhanced CSS with ChatGPT-like styling
177
+ custom_css = """
178
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
179
+
180
+ :root {
181
+ --primary-color: #10a37f;
182
+ --primary-hover: #0d8d6c;
183
+ --secondary-color: #f7f7f8;
184
+ --accent-color: #0066cc;
185
+ --success-color: #10a37f;
186
+ --warning-color: #ff9500;
187
+ --error-color: #ff3333;
188
+ --background-primary: #ffffff;
189
+ --background-secondary: #f7f7f8;
190
+ --background-chat: #ffffff;
191
+ --text-primary: #2d3748;
192
+ --text-secondary: #4a5568;
193
+ --text-light: #718096;
194
+ --border-color: #e2e8f0;
195
+ --shadow-primary: 0 2px 8px rgba(0, 0, 0, 0.1);
196
+ --shadow-secondary: 0 1px 3px rgba(0, 0, 0, 0.1);
197
+ }
198
+
199
+ * {
200
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
201
+ }
202
+
203
+ .gradio-container {
204
+ background: var(--background-primary);
205
+ color: var(--text-primary);
206
+ min-height: 100vh;
207
+ }
208
+
209
+ /* Header styling */
210
+ .header {
211
+ background: linear-gradient(135deg, var(--primary-color) 0%, var(--accent-color) 100%);
212
+ color: white;
213
+ padding: 20px;
214
+ text-align: center;
215
+ border-radius: 0 0 20px 20px;
216
+ margin-bottom: 20px;
217
+ box-shadow: var(--shadow-primary);
218
+ }
219
+
220
+ .header h1 {
221
+ font-size: 2.5em;
222
+ font-weight: 700;
223
+ margin: 0;
224
+ text-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
225
+ }
226
+
227
+ .header p {
228
+ font-size: 1.1em;
229
+ margin: 10px 0 0 0;
230
+ opacity: 0.9;
231
+ }
232
+
233
+ /* Settings panel styling */
234
+ .settings-panel {
235
+ background: var(--background-secondary);
236
+ border: 1px solid var(--border-color);
237
+ border-radius: 12px;
238
+ padding: 20px;
239
+ margin-bottom: 20px;
240
+ box-shadow: var(--shadow-secondary);
241
+ }
242
+
243
+ .settings-panel h3 {
244
+ color: var(--text-primary);
245
+ margin-top: 0;
246
+ margin-bottom: 15px;
247
+ font-weight: 600;
248
+ }
249
+
250
+ /* Chat container styling */
251
+ .chat-container {
252
+ background: var(--background-chat);
253
+ border: 1px solid var(--border-color);
254
+ border-radius: 12px;
255
+ box-shadow: var(--shadow-secondary);
256
+ overflow: hidden;
257
+ }
258
+
259
+ /* Status bar styling */
260
+ .status-bar {
261
+ background: linear-gradient(135deg, var(--success-color), var(--accent-color));
262
+ color: white;
263
+ padding: 10px 15px;
264
+ border-radius: 8px;
265
+ margin: 10px 0;
266
+ font-size: 0.9em;
267
+ font-weight: 500;
268
+ }
269
+
270
+ /* Model info styling */
271
+ .model-info {
272
+ background: var(--background-secondary);
273
+ border: 1px solid var(--border-color);
274
+ border-radius: 8px;
275
+ padding: 15px;
276
+ margin: 10px 0;
277
+ font-size: 0.9em;
278
+ }
279
+
280
+ /* Button styling */
281
+ .gr-button {
282
+ background: var(--primary-color);
283
+ color: white;
284
+ border: none;
285
+ border-radius: 8px;
286
+ padding: 10px 20px;
287
+ font-weight: 500;
288
+ transition: all 0.2s ease;
289
+ cursor: pointer;
290
+ }
291
+
292
+ .gr-button:hover {
293
+ background: var(--primary-hover);
294
+ transform: translateY(-1px);
295
+ }
296
+
297
+ .gr-button[variant="secondary"] {
298
+ background: var(--warning-color);
299
+ }
300
+
301
+ .gr-button[variant="secondary"]:hover {
302
+ background: #e6850e;
303
+ }
304
+
305
+ /* Input styling */
306
+ .gr-textbox, .gr-dropdown, .gr-slider {
307
+ border: 1px solid var(--border-color);
308
+ border-radius: 8px;
309
+ padding: 10px;
310
+ font-size: 14px;
311
+ transition: border-color 0.2s ease;
312
+ }
313
+
314
+ .gr-textbox:focus, .gr-dropdown:focus {
315
+ border-color: var(--primary-color);
316
+ outline: none;
317
+ box-shadow: 0 0 0 3px rgba(16, 163, 127, 0.1);
318
+ }
319
+
320
+ /* Chatbot styling */
321
+ .gr-chatbot {
322
+ background: transparent;
323
+ border: none;
324
+ font-size: 14px;
325
+ line-height: 1.6;
326
+ }
327
+
328
+ /* Message styling */
329
+ .message {
330
+ padding: 15px;
331
+ margin: 8px 0;
332
+ border-radius: 12px;
333
+ max-width: 85%;
334
+ word-wrap: break-word;
335
+ }
336
+
337
+ .message.user {
338
+ background: var(--primary-color);
339
+ color: white;
340
+ margin-left: auto;
341
+ margin-right: 0;
342
+ }
343
+
344
+ .message.assistant {
345
+ background: var(--background-secondary);
346
+ color: var(--text-primary);
347
+ margin-right: auto;
348
+ margin-left: 0;
349
+ }
350
+
351
+ /* Advanced controls */
352
+ .advanced-controls {
353
+ background: var(--background-secondary);
354
+ border: 1px solid var(--border-color);
355
+ border-radius: 8px;
356
+ padding: 15px;
357
+ margin: 10px 0;
358
+ }
359
+
360
+ .advanced-controls h4 {
361
+ margin-top: 0;
362
+ margin-bottom: 10px;
363
+ color: var(--text-primary);
364
+ }
365
+
366
+ /* Responsive design */
367
+ @media (max-width: 768px) {
368
+ .header h1 {
369
+ font-size: 2em;
370
+ }
371
+
372
+ .settings-panel {
373
+ padding: 15px;
374
+ }
375
+
376
+ .gr-button {
377
+ padding: 8px 16px;
378
+ font-size: 13px;
379
+ }
380
+ }
381
+
382
+ /* Loading animation */
383
+ @keyframes thinking {
384
+ 0%, 80%, 100% { opacity: 1; }
385
+ 40% { opacity: 0.3; }
386
+ }
387
+
388
+ .thinking {
389
+ animation: thinking 1.5s infinite;
390
+ }
391
+
392
+ /* Fade in animation */
393
+ @keyframes fadeIn {
394
+ from { opacity: 0; transform: translateY(10px); }
395
+ to { opacity: 1; transform: translateY(0); }
396
+ }
397
+
398
+ .fade-in {
399
+ animation: fadeIn 0.3s ease-out;
400
+ }
401
+ """
402
+
403
+ # Create the ChatGPT-like interface
404
+ with gr.Blocks(css=custom_css, title="๐Ÿค– ChatGPT-like AI Assistant", theme=gr.themes.Soft()) as demo:
405
+ # Header
406
+ with gr.Row(elem_classes="header"):
407
+ gr.HTML("""
408
+ <div>
409
+ <h1>๐Ÿค– ChatGPT-like AI Assistant</h1>
410
+ <p>Powered by OpenAI's GPT models with advanced conversation features</p>
411
+ </div>
412
+ """)
413
+
414
+ with gr.Row():
415
+ # Left column - Settings and controls
416
+ with gr.Column(scale=1):
417
+ with gr.Group(elem_classes="settings-panel fade-in"):
418
+ gr.Markdown("### โš™๏ธ **Configuration**")
419
+
420
+ # API Key
421
+ api_key_input = gr.Textbox(
422
+ label="๐Ÿ”‘ OpenAI API Key",
423
+ type="password",
424
+ value=initial_api_key,
425
+ placeholder="sk-...",
426
+ info="Your OpenAI API key for accessing GPT models"
427
+ )
428
+
429
+ # Model Selection
430
+ model_dropdown = gr.Dropdown(
431
+ choices=[
432
+ "gpt-4o-mini",
433
+ "gpt-4o",
434
+ "gpt-4-turbo",
435
+ "gpt-4",
436
+ "gpt-3.5-turbo"
437
+ ],
438
+ value="gpt-4o-mini",
439
+ label="๐Ÿง  AI Model",
440
+ info="Choose the GPT model for your conversation"
441
+ )
442
+
443
+ # Model information display
444
+ model_info_display = gr.Markdown(
445
+ get_model_info("gpt-4o-mini"),
446
+ elem_classes="model-info"
447
+ )
448
+
449
+ # System Prompt Selection
450
+ system_prompt_dropdown = gr.Dropdown(
451
+ choices=list(SYSTEM_PROMPTS.keys()) + ["Custom"],
452
+ value="Default Assistant",
453
+ label="๐ŸŽญ Assistant Personality",
454
+ info="Choose how the AI should behave"
455
+ )
456
+
457
+ # Custom System Prompt
458
+ custom_system_prompt = gr.Textbox(
459
+ label="โœ๏ธ Custom System Prompt",
460
+ placeholder="Enter your custom system prompt here...",
461
+ lines=3,
462
+ visible=False,
463
+ info="Define custom behavior for the AI"
464
+ )
465
+
466
+ # Advanced Controls
467
+ with gr.Group(elem_classes="advanced-controls fade-in"):
468
+ gr.Markdown("### ๐ŸŽ›๏ธ **Advanced Settings**")
469
+
470
+ temperature_slider = gr.Slider(
471
+ minimum=0.1,
472
+ maximum=2.0,
473
+ value=0.7,
474
+ step=0.1,
475
+ label="๐ŸŒก๏ธ Temperature",
476
+ info="Higher values = more creative, lower = more focused"
477
+ )
478
+
479
+ max_tokens_slider = gr.Slider(
480
+ minimum=100,
481
+ maximum=4000,
482
+ value=2000,
483
+ step=100,
484
+ label="๐Ÿ“ Max Tokens",
485
+ info="Maximum length of the AI response"
486
+ )
487
+
488
+ # Control Buttons
489
+ with gr.Group(elem_classes="settings-panel fade-in"):
490
+ gr.Markdown("### ๐ŸŽฎ **Controls**")
491
+
492
+ with gr.Row():
493
+ clear_btn = gr.Button("๐Ÿ—‘๏ธ Clear Chat", variant="secondary")
494
+ export_btn = gr.Button("๐Ÿ“ฅ Export", variant="primary")
495
+
496
+ # Status Display
497
+ status_display = gr.Markdown(
498
+ get_status_info(),
499
+ elem_classes="status-bar"
500
+ )
501
+
502
+ # Tips and Info
503
+ with gr.Group(elem_classes="settings-panel fade-in"):
504
+ gr.Markdown("""
505
+ ### ๐Ÿ’ก **Tips**
506
+ - **Temperature**: 0.3-0.7 for focused responses, 0.7-1.2 for creative writing
507
+ - **Max Tokens**: Higher values allow longer responses but cost more
508
+ - **System Prompts**: Try different personalities for varied conversation styles
509
+ - **Memory**: The AI remembers your conversation context automatically
510
+ """)
511
+
512
+ # Right column - Chat interface
513
+ with gr.Column(scale=2):
514
+ with gr.Group(elem_classes="chat-container fade-in"):
515
+ # Chat interface
516
+ chatbot = gr.Chatbot(
517
+ label="๐Ÿ’ฌ **Conversation**",
518
+ height=600,
519
+ show_copy_button=True,
520
+ show_label=True,
521
+ container=True,
522
+ type="messages",
523
+ avatar_images=("https://cdn-icons-png.flaticon.com/512/847/847969.png",
524
+ "https://cdn-icons-png.flaticon.com/512/4712/4712109.png")
525
+ )
526
+
527
+ # Message input
528
+ with gr.Row():
529
+ msg = gr.Textbox(
530
+ label="",
531
+ placeholder="Type your message here... (Shift+Enter for new line)",
532
+ lines=2,
533
+ max_lines=6,
534
+ scale=4,
535
+ container=False,
536
+ autofocus=True
537
+ )
538
+ send_btn = gr.Button("๐Ÿ“ค Send", variant="primary", scale=1)
539
+
540
+ # Welcome message
541
+ with gr.Group(elem_classes="settings-panel fade-in"):
542
+ gr.Markdown("""
543
+ ## ๐Ÿš€ **Welcome to Your AI Assistant!**
544
+
545
+ This ChatGPT-like interface offers advanced features for natural conversation:
546
+
547
+ ### โœจ **Key Features**
548
+ - **๐Ÿง  Multiple AI Models**: Choose from GPT-3.5, GPT-4, and GPT-4o variants
549
+ - **๐ŸŽญ Personality Modes**: Pre-configured system prompts for different use cases
550
+ - **๐Ÿ’พ Conversation Memory**: Maintains context throughout your session
551
+ - **๐ŸŽ›๏ธ Advanced Controls**: Fine-tune temperature and response length
552
+ - **๐Ÿ“ฅ Export Conversations**: Save your chats as JSON files
553
+ - **โšก Streaming Responses**: Real-time response generation
554
+
555
+ ### ๐ŸŽฏ **Getting Started**
556
+ 1. **Add your OpenAI API key** (required for functionality)
557
+ 2. **Choose your preferred model** (GPT-4o-mini recommended for speed)
558
+ 3. **Select an assistant personality** or create your own
559
+ 4. **Start chatting** - the AI will remember your conversation!
560
+
561
+ *Built with โค๏ธ for the best conversational AI experience*
562
+ """)
563
+
564
+ # Event handlers
565
+ def submit_message(message, history, api_key, model, temperature, max_tokens, system_prompt_choice, custom_system_prompt):
566
+ """Handle message submission"""
567
+ if not message.strip():
568
+ return history, ""
569
+
570
+ # Use async generator for streaming
571
+ return chat_response_stream(message, history, api_key, model, temperature, max_tokens, system_prompt_choice, custom_system_prompt)
572
+
573
+ def on_system_prompt_change(choice):
574
+ """Handle system prompt dropdown change"""
575
+ if choice == "Custom":
576
+ return gr.update(visible=True)
577
+ else:
578
+ return gr.update(visible=False)
579
+
580
+ def on_model_change(model):
581
+ """Handle model selection change"""
582
+ return get_model_info(model), get_status_info()
583
+
584
+ def refresh_status():
585
+ """Refresh status display"""
586
+ return get_status_info()
587
+
588
+ # Connect events
589
+ send_btn.click(
590
+ submit_message,
591
+ inputs=[msg, chatbot, api_key_input, model_dropdown, temperature_slider, max_tokens_slider, system_prompt_dropdown, custom_system_prompt],
592
+ outputs=[chatbot, msg]
593
+ )
594
+
595
+ msg.submit(
596
+ submit_message,
597
+ inputs=[msg, chatbot, api_key_input, model_dropdown, temperature_slider, max_tokens_slider, system_prompt_dropdown, custom_system_prompt],
598
+ outputs=[chatbot, msg]
599
+ )
600
+
601
+ clear_btn.click(
602
+ clear_chat,
603
+ outputs=[chatbot]
604
+ )
605
+
606
+ system_prompt_dropdown.change(
607
+ on_system_prompt_change,
608
+ inputs=[system_prompt_dropdown],
609
+ outputs=[custom_system_prompt]
610
+ )
611
+
612
+ model_dropdown.change(
613
+ on_model_change,
614
+ inputs=[model_dropdown],
615
+ outputs=[model_info_display, status_display]
616
+ )
617
+
618
+ # Auto-refresh status
619
+ demo.load(refresh_status, outputs=[status_display])
620
+
621
+ # Launch the application
622
+ if __name__ == "__main__":
623
+ demo.launch(
624
+ share=False,
625
+ show_error=True,
626
+ show_api=False,
627
+ quiet=False,
628
+ ssl_verify=False
629
+ )