Shinhati2023 commited on
Commit
94f720e
Β·
verified Β·
1 Parent(s): 87946cd

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +708 -0
app.py ADDED
@@ -0,0 +1,708 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Groq AI Studio Pro - Glassmorphism Edition with Auto-Fallback
3
+ A complete, copy-paste ready tool for Groq API with intelligent model fallback
4
+ Features: Glass UI, Mobile-Responsive, Auto Model Switching, GitHub Integration
5
+ """
6
+
7
+ import os
8
+ import json
9
+ import base64
10
+ import requests
11
+ import gradio as gr
12
+ from datetime import datetime
13
+ from typing import Optional, List, Dict, Any, Tuple
14
+ from functools import lru_cache
15
+
16
+ # ==================== CONFIGURATION ====================
17
+ # Set these environment variables:
18
+ # GROQ_API_KEY - Your Groq API key
19
+ # GITHUB_TOKEN - Your GitHub Personal Access Token
20
+ # GITHUB_REPO - Your repo (format: username/repo-name)
21
+
22
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY", "")
23
+ GITHUB_TOKEN = os.getenv("GITHUB_TOKEN", "")
24
+ GITHUB_REPO = os.getenv("GITHUB_REPO", "")
25
+
26
+ # Fallback chain - if selected model fails, try these in order
27
+ MODEL_FALLBACK_CHAIN = [
28
+ "llama-3.3-70b-versatile", # Primary workhorse
29
+ "meta-llama/llama-4-scout-17b-16e-instruct", # Llama 4 Scout
30
+ "meta-llama/llama-4-maverick-17b-128e-instruct", # Llama 4 Maverick
31
+ "llama-3.1-8b-instant", # Fast fallback
32
+ "openai/gpt-oss-120b", # GPT-OSS large
33
+ "openai/gpt-oss-20b", # GPT-OSS small
34
+ "qwen/qwen3-32b", # Qwen 3
35
+ "mixtral-8x7b-32768", # Legacy fallback
36
+ ]
37
+
38
+ # Default models shown in UI (will be updated dynamically)
39
+ DEFAULT_MODELS = [
40
+ "llama-3.3-70b-versatile",
41
+ "meta-llama/llama-4-scout-17b-16e-instruct",
42
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
43
+ "llama-3.1-8b-instant",
44
+ "openai/gpt-oss-120b",
45
+ "openai/gpt-oss-20b",
46
+ "deepseek-r1-distill-llama-70b",
47
+ "qwen/qwen3-32b",
48
+ "gemma2-9b-it",
49
+ "mixtral-8x7b-32768",
50
+ "whisper-large-v3-turbo",
51
+ "groq/compound",
52
+ ]
53
+
54
+ # ==================== GROQ API CLIENT ====================
55
+ class GroqClient:
56
+ def __init__(self, api_key: str):
57
+ self.api_key = api_key
58
+ self.base_url = "https://api.groq.com/openai/v1"
59
+ self.headers = {
60
+ "Authorization": f"Bearer {api_key}",
61
+ "Content-Type": "application/json"
62
+ }
63
+ self.available_models = []
64
+
65
+ def fetch_models(self) -> List[str]:
66
+ """Fetch available models from Groq API"""
67
+ url = f"{self.base_url}/models"
68
+ try:
69
+ response = requests.get(url, headers=self.headers, timeout=10)
70
+ if response.status_code == 200:
71
+ data = response.json()
72
+ models = [m["id"] for m in data.get("data", [])]
73
+ self.available_models = models
74
+ return models
75
+ except Exception as e:
76
+ print(f"Error fetching models: {e}")
77
+ return []
78
+
79
+ def chat_completion(
80
+ self,
81
+ messages: List[Dict[str, str]],
82
+ model: str,
83
+ temperature: float = 0.7,
84
+ max_tokens: int = 4096,
85
+ stream: bool = False,
86
+ retry_with_fallback: bool = True
87
+ ) -> Tuple[Dict[str, Any], str]:
88
+ """
89
+ Send chat completion request with automatic fallback
90
+ Returns: (response_dict, model_used)
91
+ """
92
+ url = f"{self.base_url}/chat/completions"
93
+
94
+ # Determine which models to try
95
+ models_to_try = [model]
96
+ if retry_with_fallback and model in MODEL_FALLBACK_CHAIN:
97
+ # Add remaining fallback chain after the selected model
98
+ idx = MODEL_FALLBACK_CHAIN.index(model)
99
+ models_to_try.extend(MODEL_FALLBACK_CHAIN[idx+1:])
100
+ elif retry_with_fallback:
101
+ models_to_try.extend(MODEL_FALLBACK_CHAIN)
102
+
103
+ last_error = None
104
+
105
+ for try_model in models_to_try:
106
+ payload = {
107
+ "model": try_model,
108
+ "messages": messages,
109
+ "temperature": temperature,
110
+ "max_completion_tokens": max_tokens,
111
+ "stream": stream
112
+ }
113
+
114
+ try:
115
+ response = requests.post(url, headers=self.headers, json=payload, timeout=60)
116
+
117
+ if response.status_code == 200:
118
+ return response.json(), try_model
119
+
120
+ # Handle specific errors
121
+ error_data = response.json() if response.text else {}
122
+ error_msg = error_data.get("error", {}).get("message", "")
123
+
124
+ # If model not found, try next
125
+ if "model_not_found" in error_msg or "does not exist" in error_msg:
126
+ print(f"Model {try_model} not available, trying fallback...")
127
+ last_error = f"{try_model}: {error_msg}"
128
+ continue
129
+
130
+ # For other errors, return immediately
131
+ return {"error": error_msg or f"HTTP {response.status_code}", "detail": response.text}, try_model
132
+
133
+ except requests.exceptions.RequestException as e:
134
+ last_error = str(e)
135
+ continue
136
+
137
+ # All models failed
138
+ return {"error": f"All models failed. Last error: {last_error}"}, model
139
+
140
+ # ==================== GITHUB INTEGRATION ====================
141
+ class GitHubIntegration:
142
+ def __init__(self, token: str, repo: str):
143
+ self.token = token
144
+ self.repo = repo
145
+ self.base_url = "https://api.github.com"
146
+ self.headers = {
147
+ "Authorization": f"token {token}",
148
+ "Accept": "application/vnd.github.v3+json"
149
+ }
150
+
151
+ def commit_file(
152
+ self,
153
+ content: str,
154
+ filename: str,
155
+ path: str = "groq-studio-sessions",
156
+ message: Optional[str] = None
157
+ ) -> Dict[str, str]:
158
+ """Commit a file to GitHub repository"""
159
+ if not self.token or not self.repo:
160
+ return {"status": "error", "message": "GitHub credentials not configured"}
161
+
162
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
163
+ full_filename = f"{timestamp}_{filename}"
164
+ file_path = f"{path}/{full_filename}"
165
+
166
+ content_bytes = content.encode('utf-8')
167
+ content_b64 = base64.b64encode(content_bytes).decode('utf-8')
168
+
169
+ check_url = f"{self.base_url}/repos/{self.repo}/contents/{file_path}"
170
+
171
+ try:
172
+ check_resp = requests.get(check_url, headers=self.headers)
173
+ sha = check_resp.json().get("sha") if check_resp.status_code == 200 else None
174
+
175
+ data = {
176
+ "message": message or f"Update from Groq Studio - {timestamp}",
177
+ "content": content_b64,
178
+ "branch": "main"
179
+ }
180
+ if sha:
181
+ data["sha"] = sha
182
+
183
+ resp = requests.put(check_url, headers=self.headers, json=data)
184
+ resp.raise_for_status()
185
+
186
+ result = resp.json()
187
+ return {
188
+ "status": "success",
189
+ "url": result["content"]["html_url"],
190
+ "sha": result["content"]["sha"],
191
+ "path": file_path
192
+ }
193
+ except Exception as e:
194
+ return {"status": "error", "message": str(e)}
195
+
196
+ def create_gist(self, content: str, description: str = "Groq Studio Export", public: bool = False) -> Dict[str, str]:
197
+ """Create a GitHub Gist"""
198
+ if not self.token:
199
+ return {"status": "error", "message": "GitHub token not configured"}
200
+
201
+ url = f"{self.base_url}/gists"
202
+ data = {
203
+ "description": description,
204
+ "public": public,
205
+ "files": {
206
+ "groq_session.md": {
207
+ "content": content
208
+ }
209
+ }
210
+ }
211
+
212
+ try:
213
+ resp = requests.post(url, headers=self.headers, json=data)
214
+ resp.raise_for_status()
215
+ result = resp.json()
216
+ return {
217
+ "status": "success",
218
+ "url": result["html_url"],
219
+ "id": result["id"]
220
+ }
221
+ except Exception as e:
222
+ return {"status": "error", "message": str(e)}
223
+
224
+ # ==================== UI FUNCTIONS ====================
225
+ def refresh_models(api_key: str) -> gr.update:
226
+ """Refresh available models from API"""
227
+ key = api_key or GROQ_API_KEY
228
+ if not key:
229
+ return gr.update(choices=DEFAULT_MODELS, value=DEFAULT_MODELS[0])
230
+
231
+ client = GroqClient(key)
232
+ models = client.fetch_models()
233
+
234
+ if models:
235
+ # Prioritize common models at top
236
+ priority = ["llama-3.3-70b-versatile", "meta-llama/llama-4", "gpt-oss", "qwen", "llama-3.1"]
237
+ sorted_models = []
238
+ for p in priority:
239
+ sorted_models.extend([m for m in models if p in m and m not in sorted_models])
240
+ sorted_models.extend([m for m in models if m not in sorted_models])
241
+ return gr.update(choices=sorted_models, value=sorted_models[0])
242
+
243
+ return gr.update(choices=DEFAULT_MODELS, value=DEFAULT_MODELS[0])
244
+
245
+ def process_chat(
246
+ message: str,
247
+ history: List[List[str]],
248
+ model: str,
249
+ temperature: float,
250
+ max_tokens: int,
251
+ system_prompt: str,
252
+ api_key: str,
253
+ auto_fallback: bool
254
+ ) -> tuple:
255
+ """Process chat message with Groq API and auto-fallback"""
256
+ if not api_key and not GROQ_API_KEY:
257
+ return history + [[message, "❌ Error: Please provide a Groq API Key in settings or environment variable."]], "", "⚠️ No API Key"
258
+
259
+ key = api_key or GROQ_API_KEY
260
+ client = GroqClient(key)
261
+
262
+ # Build messages
263
+ messages = []
264
+ if system_prompt:
265
+ messages.append({"role": "system", "content": system_prompt})
266
+
267
+ for human, assistant in history:
268
+ messages.append({"role": "user", "content": human})
269
+ if assistant:
270
+ messages.append({"role": "assistant", "content": assistant})
271
+
272
+ messages.append({"role": "user", "content": message})
273
+
274
+ # Show typing indicator
275
+ history = history + [[message, "⏳ Thinking..."]]
276
+ yield history, "", f"πŸ”„ Using {model}..."
277
+
278
+ # Get response with fallback
279
+ response, model_used = client.chat_completion(
280
+ messages=messages,
281
+ model=model,
282
+ temperature=temperature,
283
+ max_tokens=max_tokens,
284
+ retry_with_fallback=auto_fallback
285
+ )
286
+
287
+ # Update history
288
+ if "error" in response:
289
+ error_msg = f"❌ Error: {response['error']}"
290
+ if "detail" in response:
291
+ error_msg += f"\n\nDetails: {response['detail']}"
292
+ history[-1][1] = error_msg
293
+ status = f"❌ Error with {model_used}"
294
+ else:
295
+ try:
296
+ content = response["choices"][0]["message"]["content"]
297
+ usage = response.get("usage", {})
298
+
299
+ # Add model info footer
300
+ fallback_notice = ""
301
+ if model_used != model:
302
+ fallback_notice = f" (⚠️ Fallback from {model})"
303
+
304
+ info = f"\n\n---\n*Model: {model_used}{fallback_notice} | Tokens: {usage.get('total_tokens', 'N/A')}*"
305
+ history[-1][1] = content + info
306
+ status = f"βœ… Success with {model_used}" + (" (fallback)" if model_used != model else "")
307
+ except Exception as e:
308
+ history[-1][1] = f"❌ Error parsing response: {str(e)}"
309
+ status = f"❌ Parse Error"
310
+
311
+ yield history, "", status
312
+
313
+ def export_to_github(
314
+ history: List[List[str]],
315
+ filename: str,
316
+ commit_msg: str,
317
+ github_token: str,
318
+ github_repo: str
319
+ ) -> str:
320
+ """Export chat history to GitHub"""
321
+ token = github_token or GITHUB_TOKEN
322
+ repo = github_repo or GITHUB_REPO
323
+
324
+ if not token or not repo:
325
+ return "❌ Error: GitHub token and repo not configured."
326
+
327
+ content = format_conversation(history)
328
+ gh = GitHubIntegration(token, repo)
329
+ result = gh.commit_file(content, filename or "session.md", message=commit_msg)
330
+
331
+ if result["status"] == "success":
332
+ return f"βœ… Committed successfully!\n\nπŸ”— [View on GitHub]({result['url']})"
333
+ else:
334
+ return f"❌ Error: {result['message']}"
335
+
336
+ def create_gist_export(
337
+ history: List[List[str]],
338
+ description: str,
339
+ github_token: str
340
+ ) -> str:
341
+ """Export as GitHub Gist"""
342
+ token = github_token or GITHUB_TOKEN
343
+
344
+ if not token:
345
+ return "❌ Error: GitHub token not configured."
346
+
347
+ content = format_conversation(history)
348
+ gh = GitHubIntegration(token, "")
349
+ result = gh.create_gist(content, description)
350
+
351
+ if result["status"] == "success":
352
+ return f"βœ… Gist created!\n\nπŸ”— [View Gist]({result['url']})"
353
+ else:
354
+ return f"❌ Error: {result['message']}"
355
+
356
+ def format_conversation(history: List[List[str]]) -> str:
357
+ """Format chat history as markdown"""
358
+ content = "# Groq Studio Session\n\n"
359
+ content += f"**Date:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
360
+ content += "## Conversation\n\n"
361
+
362
+ for i, (human, assistant) in enumerate(history, 1):
363
+ if not assistant or assistant == "⏳ Thinking...":
364
+ continue
365
+ content += f"### Turn {i}\n\n"
366
+ content += f"**User:**\n{human}\n\n"
367
+ content += f"**Assistant:**\n{assistant}\n\n---\n\n"
368
+
369
+ return content
370
+
371
+ def clear_chat():
372
+ """Clear chat history"""
373
+ return [], "", "πŸ”„ Ready"
374
+
375
+ def debug_request(
376
+ message: str,
377
+ history: List[List[str]],
378
+ model: str,
379
+ temperature: float,
380
+ max_tokens: int,
381
+ system_prompt: str,
382
+ api_key: str
383
+ ) -> str:
384
+ """Generate debug/curl command"""
385
+ key = api_key or GROQ_API_KEY
386
+
387
+ messages = []
388
+ if system_prompt:
389
+ messages.append({"role": "system", "content": system_prompt})
390
+ for human, assistant in history:
391
+ messages.append({"role": "user", "content": human})
392
+ if assistant:
393
+ messages.append({"role": "assistant", "content": assistant})
394
+ messages.append({"role": "user", "content": message})
395
+
396
+ payload = {
397
+ "model": model,
398
+ "messages": messages,
399
+ "temperature": temperature,
400
+ "max_completion_tokens": max_tokens
401
+ }
402
+
403
+ curl_cmd = f"""curl -X POST https://api.groq.com/openai/v1/chat/completions \\
404
+ -H "Authorization: Bearer {key[:10]}...{key[-4:] if len(key) > 14 else ''}" \\
405
+ -H "Content-Type: application/json" \\
406
+ -d '{json.dumps(payload, indent=2)}'"""
407
+
408
+ python_code = f"""import requests
409
+ import json
410
+ import os
411
+
412
+ url = "https://api.groq.com/openai/v1/chat/completions"
413
+ headers = {{
414
+ "Authorization": f"Bearer {{os.getenv('GROQ_API_KEY')}}",
415
+ "Content-Type": "application/json"
416
+ }}
417
+
418
+ payload = {json.dumps(payload, indent=2)}
419
+
420
+ response = requests.post(url, headers=headers, json=payload)
421
+ print(f"Status: {{response.status_code}}")
422
+ print(f"Response: {{response.text}}")"""
423
+
424
+ return f"## cURL Command\n```bash\n{curl_cmd}\n```\n\n## Python Code\n```python\n{python_code}\n```"
425
+
426
+ # ==================== CUSTOM CSS ====================
427
+ CUSTOM_CSS = """
428
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
429
+
430
+ :root {
431
+ --glass-bg: rgba(20, 20, 30, 0.6);
432
+ --glass-border: rgba(255, 255, 255, 0.1);
433
+ --accent-primary: #ff6b6b;
434
+ --accent-secondary: #4ecdc4;
435
+ --text-primary: #ffffff;
436
+ --gradient-1: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
437
+ }
438
+
439
+ body {
440
+ font-family: 'Inter', sans-serif !important;
441
+ background: linear-gradient(-45deg, #0f0c29, #302b63, #24243e, #1a1a2e) !important;
442
+ background-size: 400% 400% !important;
443
+ animation: gradientBG 15s ease infinite !important;
444
+ min-height: 100vh;
445
+ }
446
+
447
+ @keyframes gradientBG {
448
+ 0% { background-position: 0% 50%; }
449
+ 50% { background-position: 100% 50%; }
450
+ 100% { background-position: 0% 50%; }
451
+ }
452
+
453
+ .glass-panel {
454
+ background: var(--glass-bg) !important;
455
+ backdrop-filter: blur(20px) !important;
456
+ -webkit-backdrop-filter: blur(20px) !important;
457
+ border: 1px solid var(--glass-border) !important;
458
+ border-radius: 24px !important;
459
+ box-shadow: 0 8px 32px 0 rgba(0, 0, 0, 0.37) !important;
460
+ }
461
+
462
+ .chatbot {
463
+ background: rgba(0, 0, 0, 0.3) !important;
464
+ border-radius: 20px !important;
465
+ border: 1px solid var(--glass-border) !important;
466
+ height: 600px !important;
467
+ }
468
+
469
+ .chatbot .message {
470
+ background: var(--glass-bg) !important;
471
+ border: 1px solid var(--glass-border) !important;
472
+ border-radius: 18px !important;
473
+ margin: 8px 0 !important;
474
+ padding: 16px !important;
475
+ backdrop-filter: blur(10px) !important;
476
+ }
477
+
478
+ .chatbot .message.user {
479
+ background: linear-gradient(135deg, rgba(102, 126, 234, 0.3), rgba(118, 75, 162, 0.3)) !important;
480
+ border-left: 4px solid #667eea !important;
481
+ }
482
+
483
+ .chatbot .message.bot {
484
+ background: linear-gradient(135deg, rgba(78, 205, 196, 0.2), rgba(68, 160, 141, 0.2)) !important;
485
+ border-left: 4px solid #4ecdc4 !important;
486
+ }
487
+
488
+ input, textarea, select {
489
+ background: rgba(0, 0, 0, 0.4) !important;
490
+ border: 1px solid var(--glass-border) !important;
491
+ border-radius: 12px !important;
492
+ color: var(--text-primary) !important;
493
+ padding: 12px 16px !important;
494
+ }
495
+
496
+ button {
497
+ background: var(--gradient-1) !important;
498
+ border: none !important;
499
+ border-radius: 12px !important;
500
+ color: white !important;
501
+ font-weight: 600 !important;
502
+ padding: 12px 24px !important;
503
+ transition: all 0.3s ease !important;
504
+ }
505
+
506
+ button:hover {
507
+ transform: translateY(-2px) !important;
508
+ box-shadow: 0 10px 30px rgba(102, 126, 234, 0.4) !important;
509
+ }
510
+
511
+ .status-bar {
512
+ background: rgba(0, 0, 0, 0.5) !important;
513
+ border-radius: 8px !important;
514
+ padding: 8px 16px !important;
515
+ font-size: 12px !important;
516
+ color: #4ecdc4 !important;
517
+ border: 1px solid var(--glass-border) !important;
518
+ }
519
+
520
+ @media (max-width: 768px) {
521
+ .glass-panel { border-radius: 16px !important; margin: 8px !important; }
522
+ .chatbot { height: 400px !important; }
523
+ button { padding: 10px 16px !important; font-size: 14px !important; }
524
+ }
525
+ """
526
+
527
+ # ==================== GRADIO INTERFACE ====================
528
+ def create_interface():
529
+ with gr.Blocks(css=CUSTOM_CSS, title="Groq AI Studio Pro") as demo:
530
+
531
+ # Header
532
+ with gr.Row():
533
+ with gr.Column():
534
+ gr.HTML("""
535
+ <div class="glass-panel" style="text-align: center; padding: 20px; margin-bottom: 20px;">
536
+ <h1 style="background: linear-gradient(135deg, #fff 0%, #a8edea 100%); -webkit-background-clip: text; -webkit-text-fill-color: transparent; font-weight: 700; margin: 0;">
537
+ ⚑ Groq AI Studio Pro
538
+ </h1>
539
+ <p style="color: #b0b0b0; margin-top: 8px;">
540
+ <span style="color: #4ecdc4;">●</span> Auto-Fallback Enabled | Glass UI | GitHub Integration
541
+ </p>
542
+ </div>
543
+ """)
544
+
545
+ # State
546
+ state_history = gr.State([])
547
+
548
+ with gr.Row():
549
+ # Left Panel - Chat
550
+ with gr.Column(scale=3):
551
+ with gr.Group(elem_classes="glass-panel"):
552
+ chatbot = gr.Chatbot(
553
+ label="Conversation",
554
+ elem_classes="chatbot",
555
+ bubble_full_width=False,
556
+ show_copy_button=True
557
+ )
558
+
559
+ with gr.Row():
560
+ msg_input = gr.Textbox(
561
+ placeholder="Type your message here... (Shift+Enter for new line)",
562
+ label="",
563
+ scale=4,
564
+ show_label=False,
565
+ lines=1
566
+ )
567
+ send_btn = gr.Button("Send ➀", scale=1, variant="primary")
568
+
569
+ with gr.Row():
570
+ clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary")
571
+ debug_btn = gr.Button("πŸ”§ Debug", variant="secondary")
572
+ export_btn = gr.Button("πŸ“€ Export", variant="secondary")
573
+
574
+ # Status bar
575
+ status_text = gr.Textbox(
576
+ value="πŸ”„ Ready",
577
+ label="",
578
+ interactive=False,
579
+ elem_classes="status-bar"
580
+ )
581
+
582
+ # Right Panel - Settings
583
+ with gr.Column(scale=1):
584
+ with gr.Group(elem_classes="glass-panel"):
585
+ gr.Markdown("### βš™οΈ Configuration")
586
+
587
+ with gr.Accordion("πŸ”‘ API Settings", open=False):
588
+ api_key_input = gr.Textbox(
589
+ label="Groq API Key",
590
+ placeholder="gsk_...",
591
+ type="password",
592
+ value=GROQ_API_KEY
593
+ )
594
+
595
+ github_token_input = gr.Textbox(
596
+ label="GitHub Token",
597
+ placeholder="ghp_...",
598
+ type="password",
599
+ value=GITHUB_TOKEN
600
+ )
601
+
602
+ github_repo_input = gr.Textbox(
603
+ label="GitHub Repo",
604
+ placeholder="username/repo",
605
+ value=GITHUB_REPO
606
+ )
607
+
608
+ with gr.Row():
609
+ model_dropdown = gr.Dropdown(
610
+ choices=DEFAULT_MODELS,
611
+ value=DEFAULT_MODELS[0],
612
+ label="Model"
613
+ )
614
+ refresh_btn = gr.Button("πŸ”„", size="sm")
615
+
616
+ auto_fallback = gr.Checkbox(
617
+ label="Auto-Fallback if Model Fails",
618
+ value=True,
619
+ info="Automatically try backup models"
620
+ )
621
+
622
+ system_prompt = gr.Textbox(
623
+ label="System Prompt",
624
+ placeholder="You are a helpful assistant...",
625
+ lines=3,
626
+ value="You are a helpful, creative AI assistant powered by Groq. Respond concisely but thoroughly."
627
+ )
628
+
629
+ with gr.Row():
630
+ temperature = gr.Slider(0, 2, value=0.7, label="Temperature")
631
+ max_tokens = gr.Slider(100, 8192, value=4096, label="Max Tokens")
632
+
633
+ # Debug Modal
634
+ with gr.Row(visible=False) as debug_row:
635
+ with gr.Column():
636
+ debug_output = gr.Code(label="Debug Request", language="python")
637
+ close_debug = gr.Button("Close")
638
+
639
+ # Export Modal
640
+ with gr.Row(visible=False) as export_row:
641
+ with gr.Column():
642
+ gr.Markdown("### Export to GitHub")
643
+ export_filename = gr.Textbox(label="Filename", value="session.md")
644
+ export_message = gr.Textbox(label="Commit Message", value="Update from Groq Studio")
645
+ with gr.Row():
646
+ commit_btn = gr.Button("πŸ“₯ Commit to Repo", variant="primary")
647
+ gist_btn = gr.Button("πŸ“„ Create Gist")
648
+ export_result = gr.Markdown()
649
+ close_export = gr.Button("Close")
650
+
651
+ # Event Handlers
652
+ refresh_btn.click(
653
+ refresh_models,
654
+ [api_key_input],
655
+ [model_dropdown]
656
+ )
657
+
658
+ send_event = msg_input.submit(
659
+ process_chat,
660
+ [msg_input, chatbot, model_dropdown, temperature, max_tokens, system_prompt, api_key_input, auto_fallback],
661
+ [chatbot, msg_input, status_text]
662
+ )
663
+
664
+ send_btn.click(
665
+ process_chat,
666
+ [msg_input, chatbot, model_dropdown, temperature, max_tokens, system_prompt, api_key_input, auto_fallback],
667
+ [chatbot, msg_input, status_text]
668
+ )
669
+
670
+ clear_btn.click(clear_chat, outputs=[chatbot, state_history, status_text])
671
+
672
+ debug_btn.click(
673
+ debug_request,
674
+ [msg_input, chatbot, model_dropdown, temperature, max_tokens, system_prompt, api_key_input],
675
+ [debug_output]
676
+ ).then(lambda: gr.update(visible=True), outputs=[debug_row])
677
+
678
+ close_debug.click(lambda: gr.update(visible=False), outputs=[debug_row])
679
+
680
+ export_btn.click(lambda: gr.update(visible=True), outputs=[export_row])
681
+ close_export.click(lambda: gr.update(visible=False), outputs=[export_row])
682
+
683
+ commit_btn.click(
684
+ export_to_github,
685
+ [state_history, export_filename, export_message, github_token_input, github_repo_input],
686
+ [export_result]
687
+ )
688
+
689
+ gist_btn.click(
690
+ create_gist_export,
691
+ [state_history, export_message, github_token_input],
692
+ [export_result]
693
+ )
694
+
695
+ # Footer
696
+ gr.HTML("""
697
+ <div style="text-align: center; padding: 20px; color: #666; font-size: 12px;">
698
+ <p>Powered by Groq API | Auto-Fallback: Llama 3.3 β†’ Llama 4 β†’ GPT-OSS β†’ Qwen |
699
+ <a href="https://console.groq.com" target="_blank" style="color: #4ecdc4;">Get API Key</a></p>
700
+ </div>
701
+ """)
702
+
703
+ return demo
704
+
705
+ # Launch
706
+ if __name__ == "__main__":
707
+ demo = create_interface()
708
+ demo.launch(server_name="0.0.0.0", server_port=7860)