resumesearch commited on
Commit
632c528
Β·
verified Β·
1 Parent(s): fe23536

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -1042
app.py CHANGED
@@ -1,1057 +1,73 @@
1
- # app.py
2
-
3
  import os
4
- import functools
5
  import tiktoken
6
  import gradio as gr
7
- from openai import OpenAI
8
- from typing import List, Tuple, Dict, Optional
9
-
10
- """JadeGPT – Synthwave Edition
11
- -------------------------------------------------
12
- β€’ OpenAI Python SDK β‰₯ 1.0.0
13
- β€’ Gradio β‰₯ 5.34.1
14
- β€’ tiktoken
15
-
16
- Cyberpunk meets Miami Vice. Welcome to the future, 1984.
17
- """
18
-
19
- # ────────────────────────────────
20
- # 1 Β· Initialisation & constants
21
- # ────────────────────────────────
22
 
23
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", "").strip())
24
-
25
- MODEL_DETAILS = {
26
- "gpt-4o": {"input_price": 5.00, "output_price": 15.00, "max_context": 128_000, "description": "OpenAI's most capable and cost-effective multimodal model."},
27
- "gpt-4o-mini": {"input_price": 0.15, "output_price": 0.60, "max_context": 128_000, "description": "Highly affordable and fast, suitable for simpler tasks."},
28
- "o3": {"input_price": 2.00, "output_price": 8.00, "max_context": 200_000, "description": "Designed for complex reasoning and step-by-step problem-solving."},
29
- "o3-pro": {"input_price": 20.00, "output_price": 80.00, "max_context": 200_000, "description": "Premium reasoning model for high-stakes tasks, higher accuracy."},
30
- "o4-mini": {"input_price": 1.10, "output_price": 4.40, "max_context": 200_000, "description": "Balanced multimodal model, good for image+text on a budget."},
31
- "gpt-4-32k": {"input_price": 0.06, "output_price": 0.12, "max_context": 32_768, "description": "Larger context GPT-4 model (older generation)."},
32
- "gpt-4": {"input_price": 0.03, "output_price": 0.06, "max_context": 8_192, "description": "Original GPT-4 model (older generation)."},
33
- "gpt-3.5-turbo": {"input_price": 0.001, "output_price": 0.002, "max_context": 16_385, "description": "Cost-effective, fast, and good for general-purpose tasks."},
34
- }
35
-
36
- _env_models = os.getenv("OPENAI_MODEL_LIST", "gpt-4o,gpt-4o-mini,gpt-3.5-turbo")
37
- ALL_MODELS: List[str] = [m.strip() for m in _env_models.split(",") if m.strip() and m.strip() in MODEL_DETAILS]
38
-
39
- for model in MODEL_DETAILS:
40
- if model not in ALL_MODELS:
41
- ALL_MODELS.append(model)
42
 
 
 
 
43
  if not ALL_MODELS:
44
- ALL_MODELS = list(MODEL_DETAILS.keys())
45
- if not ALL_MODELS:
46
- raise ValueError("No valid OpenAI models configured. Please check MODEL_DETAILS or OPENAI_MODEL_LIST.")
47
-
48
- DEFAULT_MAX_CONTEXT = MODEL_DETAILS.get(ALL_MODELS[0], {}).get("max_context", 128_000)
49
- BUFFER_TOKENS = 500
50
- DEFAULT_REPLY_MAX = 2_048
51
- TEMPERATURE = 0.3
52
 
53
- # ────────────────────────────────
54
- # 2 Β· Helpers
55
- # ────────────────────────────────
56
-
57
- @functools.lru_cache(maxsize=128)
58
- def count_tokens(text: str, model: str) -> int:
59
  try:
60
- enc = tiktoken.encoding_for_model(model)
61
  except KeyError:
62
- enc = tiktoken.get_encoding("cl100k_base")
63
- return len(enc.encode(text))
64
-
65
- def trim_conversation(convo: List[Dict], model: str, max_context: int) -> List[Dict]:
66
- if not convo:
67
- return []
68
 
69
- kept = [convo[0]]
70
- total = count_tokens(convo[0]["content"], model)
71
-
72
- for msg in reversed(convo[1:]):
73
- t = count_tokens(msg["content"], model)
74
- if total + t + BUFFER_TOKENS > max_context:
75
- break
76
- kept.insert(1, msg)
77
- total += t
78
- return kept
79
-
80
- def token_cost(model: str, p: int, c: int) -> float:
81
- details = MODEL_DETAILS.get(model)
82
- if not details:
83
- return 0.0
84
- return round(((p * details["input_price"]) + (c * details["output_price"])) / 1_000_000, 6)
85
 
86
- def read_file_content(file_obj) -> str:
87
- """Reads the content of an uploaded file."""
88
- if file_obj is None:
89
  return ""
90
- file_path = file_obj.name
91
  try:
92
- with open(file_path, "r", encoding="utf-8") as f:
93
- content = f.read()
94
- return f"\n\n--- Start of uploaded file: {os.path.basename(file_path)} ---\n{content}\n--- End of uploaded file ---\n"
95
- except Exception as e:
96
- return f"\n\n--- Error reading file {os.path.basename(file_path)}: {e} ---\n"
97
-
98
- # ────────────────────────────────
99
- # 3 Β· OpenAI helpers (streaming)
100
- # ────────────────────────────────
101
- def safe_chat_stream(convo: List[Dict], max_ctx: int, max_rep: int, models: List[str]):
102
- """Stream reply; after completion return usage safely (avoids max_tokens=0 bug)."""
103
- last_exc = None
104
- for m in models:
105
- try:
106
- current_model_max_context = MODEL_DETAILS.get(m, {}).get("max_context", max_ctx)
107
- trimmed_convo = trim_conversation(convo, m, current_model_max_context)
108
-
109
- if not trimmed_convo or count_tokens(" ".join([msg["content"] for msg in trimmed_convo]), m) > current_model_max_context:
110
- raise ValueError(f"Conversation too long for model '{m}' even after trimming. Max context: {current_model_max_context} tokens.")
111
-
112
- # Use appropriate parameter based on model
113
- completion_params = {
114
- "model": m,
115
- "messages": trimmed_convo,
116
- "temperature": TEMPERATURE,
117
- "stream": True,
118
- }
119
-
120
- # Check if model needs max_completion_tokens instead of max_tokens
121
- if m in ["o4-mini", "o3", "o3-pro"]:
122
- completion_params["max_completion_tokens"] = max_rep
123
- else:
124
- completion_params["max_tokens"] = max_rep
125
-
126
- stream = client.chat.completions.create(**completion_params)
127
-
128
- reply = ""
129
- for chunk in stream:
130
- delta = chunk.choices[0].delta.content or ""
131
- reply += delta
132
- yield reply, None, m
133
-
134
- usage = None
135
- try:
136
- usage_params = {
137
- "model": m,
138
- "messages": trimmed_convo + [{"role": "assistant", "content": reply}],
139
- "temperature": 0,
140
- }
141
- if m in ["o4-mini", "o3", "o3-pro"]:
142
- usage_params["max_completion_tokens"] = 1
143
- else:
144
- usage_params["max_tokens"] = 1
145
-
146
- usage_resp = client.chat.completions.create(**usage_params)
147
- usage = usage_resp.usage
148
- except Exception:
149
- prompt_tokens_est = count_tokens(" ".join([msg["content"] for msg in trimmed_convo]), m)
150
- completion_tokens_est = count_tokens(reply, m)
151
- usage = type('obj', (object,), {'prompt_tokens': prompt_tokens_est, 'completion_tokens': completion_tokens_est})()
152
-
153
- yield reply, usage, m
154
- return
155
-
156
- except Exception as e:
157
- msg = str(e).lower()
158
- last_exc = e
159
- if "context length" in msg or "maximum context length" in msg or "length_exceeded" in msg:
160
- print(f"Model '{m}' failed due to context length. Trying next model if available.")
161
- continue
162
- if "model_not_found" in msg or "does not exist" in msg or "404" in msg:
163
- print(f"Model '{m}' not found or unavailable. Trying next model if available.")
164
- continue
165
- break
166
-
167
- raise last_exc or RuntimeError("All selected models failed or an unexpected API error occurred.")
168
-
169
- # ────────────────────────────────
170
- # 4 Β· Gradio generators
171
- # ────────────────────────────────
172
- def chat_stream(user_msg: str, hist: List[Tuple[str, str]], sys_prompt: str, sel_model: str, ctx: int, rep: int, uploaded_file):
173
- user_msg = (user_msg or "").strip()
174
- if not user_msg and not uploaded_file:
175
- yield hist, "", "πŸ’š Jade is waiting for your message...", gr.File(value=None)
176
- return
177
-
178
- if not client.api_key:
179
- hist = hist or []
180
- hist.append((user_msg, "❌ JADE SYSTEM ERROR: API_KEY not found. Anthony needs to add the key!"))
181
- yield hist, "", "πŸ’” ACCESS DENIED - Missing API Key", gr.File(value=None)
182
- return
183
-
184
- file_content = read_file_content(uploaded_file)
185
- if file_content:
186
- user_msg = f"{user_msg}\n{file_content}"
187
-
188
- convo = [{"role": "system", "content": sys_prompt}]
189
- for u, a in hist or []:
190
- convo.append({"role": "user", "content": u})
191
- convo.append({"role": "assistant", "content": a})
192
- convo.append({"role": "user", "content": user_msg})
193
-
194
- hist = hist or []
195
- hist.append((user_msg, ""))
196
-
197
- status_message = f"πŸ’š JADE IS CONNECTING TO {sel_model}..."
198
- yield hist, "", status_message, gr.File(value=None)
199
-
200
- models_to_try = [sel_model] + [m for m in ALL_MODELS if m != sel_model]
201
-
202
- acc = ""
203
- usage_final = None
204
- actual_model_used = sel_model
205
-
206
- try:
207
- final_reply_chunks = []
208
- last_model_used_during_stream = sel_model
209
-
210
- stream_generator = safe_chat_stream(convo, ctx, rep, models_to_try)
211
-
212
- while True:
213
- try:
214
- part, usage, model_name_during_stream = next(stream_generator)
215
- acc = part
216
- hist[-1] = (user_msg, acc)
217
- if model_name_during_stream:
218
- last_model_used_during_stream = model_name_during_stream
219
- yield hist, "", f"πŸ’Ž JADE IS CRAFTING YOUR RESPONSE VIA {last_model_used_during_stream}...", gr.File(value=None)
220
- final_reply_chunks.append(part)
221
- except StopIteration:
222
- break
223
-
224
- prompt_tokens_final = count_tokens(" ".join([msg["content"] for msg in trim_conversation(convo, last_model_used_during_stream, MODEL_DETAILS.get(last_model_used_during_stream, {}).get("max_context", ctx))]), last_model_used_during_stream)
225
- completion_tokens_final = count_tokens(acc, last_model_used_during_stream)
226
- usage_final = type('obj', (object,), {'prompt_tokens': prompt_tokens_final, 'completion_tokens': completion_tokens_final})()
227
-
228
- if usage_final:
229
- pt, ct = usage_final.prompt_tokens, usage_final.completion_tokens
230
- cost = token_cost(last_model_used_during_stream, pt, ct)
231
- meta = f"\n\n---\nπŸ’š {pt+ct} love tokens (input {pt} / output {ct}) Β· πŸ’°${cost:.6f} USD"
232
- hist[-1] = (user_msg, acc + meta)
233
- final_status = f"✨ JADE'S TRANSMISSION COMPLETE [{last_model_used_during_stream}] {meta}"
234
- else:
235
- hist[-1] = (user_msg, acc)
236
- final_status = f"✨ JADE'S TRANSMISSION COMPLETE [{last_model_used_during_stream}]"
237
-
238
- yield hist, "", final_status, gr.File(value=None)
239
-
240
- except Exception as e:
241
- error_msg = f"πŸ’” JADE ENCOUNTERED AN ERROR: {e}"
242
- hist[-1] = (user_msg, error_msg)
243
- yield hist, "", f"πŸ’” ERROR IN JADE'S {sel_model}: {e}", gr.File(value=None)
244
-
245
- def clear_chat():
246
- return [], "", "πŸ’š JADE'S MEMORY REFRESHED. READY FOR NEW ADVENTURES WITH ANTHONY!", None, None
247
-
248
- # ────────────────────────────────
249
- # 5 Β· UI
250
- # ────────────────────────────────
251
-
252
- # --- GREEN SYNTHWAVE CSS ---
253
- SYNTHWAVE_CSS = """
254
- @import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700;900&display=swap');
255
- @import url('https://fonts.googleapis.com/css2?family=Audiowide&display=swap');
256
- @import url('https://fonts.googleapis.com/css2?family=Exo+2:wght@300;400;700;900&display=swap');
257
-
258
- /* Global green synthwave styles */
259
- html.dark.gradio-container,
260
- body.gradio-container {
261
- background:
262
- radial-gradient(ellipse at top, #001a1a 0%, transparent 60%),
263
- radial-gradient(ellipse at bottom, #004d4d 0%, transparent 60%),
264
- linear-gradient(to bottom, #000a0a 0%, #001a1a 30%, #002626 60%, #003333 100%) !important;
265
- background-attachment: fixed !important;
266
- font-family: 'Exo 2', 'Orbitron', monospace !important;
267
- color: #ff1493 !important; /* HOT PINK for all text */
268
- overflow-x: hidden;
269
- position: relative;
270
- }
271
-
272
- /* Multiple animated elements for depth */
273
- .grid-background {
274
- position: fixed;
275
- bottom: 0;
276
- left: 0;
277
- width: 100%;
278
- height: 50%;
279
- background-image:
280
- linear-gradient(rgba(0, 255, 136, 0.02) 1px, transparent 1px),
281
- linear-gradient(90deg, rgba(0, 255, 136, 0.02) 1px, transparent 1px);
282
- background-size: 50px 50px;
283
- transform: perspective(400px) rotateX(70deg);
284
- transform-origin: center bottom;
285
- z-index: -1;
286
- opacity: 0.8;
287
- }
288
-
289
- /* Animated stars */
290
- body.gradio-container::after {
291
- content: "";
292
- position: fixed;
293
- top: 0;
294
- left: 0;
295
- width: 100%;
296
- height: 100%;
297
- background-image:
298
- radial-gradient(1px 1px at 20px 30px, #00ff8866, transparent),
299
- radial-gradient(1px 1px at 40px 70px, #00ffcc66, transparent),
300
- radial-gradient(0.5px 0.5px at 50px 90px, #ffffff44, transparent),
301
- radial-gradient(0.5px 0.5px at 130px 40px, #ffffff44, transparent);
302
- background-repeat: repeat;
303
- background-size: 200px 200px;
304
- animation: stars-move 600s linear infinite; /* Very slow */
305
- z-index: -2;
306
- opacity: 0.4;
307
- }
308
-
309
- @keyframes stars-move {
310
- from { transform: translateY(0); }
311
- to { transform: translateY(-2000px); }
312
- }
313
-
314
- /* Main container with green theme - refined */
315
- gradio-app {
316
- --background-fill-primary: rgba(0, 26, 26, 0.9) !important;
317
- --background-fill-secondary: rgba(0, 13, 13, 0.95) !important;
318
- --border-color-accent: #00ff88 !important;
319
- --border-color-primary: #00ff8844 !important;
320
- --color-accent: #00ff88 !important;
321
- --color-text-primary: #ff1493 !important; /* HOT PINK */
322
- --color-text-secondary: #ff69b4 !important; /* LIGHTER HOT PINK */
323
- --button-primary-background-color: #00ff88 !important;
324
- --button-primary-text-color: #001a1a !important;
325
- --shadow-color: rgba(0, 255, 136, 0.3) !important;
326
- --block-background-fill: rgba(0, 26, 26, 0.8) !important;
327
- --block-border-color: #00ff8855 !important;
328
- --block-shadow: 0 8px 32px rgba(0, 255, 136, 0.15), 0 0 0 1px rgba(0, 255, 136, 0.1) !important;
329
- }
330
-
331
- /* Add a subtle vignette effect */
332
- .vignette-overlay {
333
- position: fixed;
334
- top: 0;
335
- left: 0;
336
- width: 100%;
337
- height: 100%;
338
- background: radial-gradient(circle at center, transparent 0%, rgba(0, 10, 10, 0.4) 100%);
339
- pointer-events: none;
340
- z-index: 10;
341
- }
342
-
343
- /* Headers with jade neon glow - more subtle */
344
- h1 {
345
- font-family: 'Audiowide', cursive !important;
346
- color: #ff1493 !important; /* HOT PINK */
347
- text-shadow:
348
- 0 0 10px #ff1493,
349
- 0 0 20px #ff1493,
350
- 0 0 30px #ff69b4,
351
- 0 0 40px #ff69b4 !important;
352
- font-size: 4em !important;
353
- letter-spacing: 6px !important;
354
- position: relative;
355
- /* Removed pulsing animation for less distraction */
356
- }
357
-
358
- h1::before {
359
- content: attr(data-text);
360
- position: absolute;
361
- left: 0;
362
- top: 0;
363
- z-index: -1;
364
- background: none;
365
- color: #ff1493;
366
- text-shadow:
367
- 0 0 10px #ff1493,
368
- 0 0 20px #ff1493,
369
- 0 0 30px #ff69b4;
370
- filter: blur(2px);
371
- opacity: 0.5;
372
- }
373
-
374
- h2, h3, h4, h5, h6 {
375
- color: #ff1493 !important; /* HOT PINK */
376
- text-shadow: 0 0 8px #ff149350 !important; /* Softer glow */
377
- font-family: 'Exo 2', sans-serif !important;
378
- font-weight: 700 !important;
379
- letter-spacing: 2px !important;
380
- }
381
-
382
- /* Text styling - Better readability */
383
- p, .prose {
384
- color: #ff1493 !important; /* HOT PINK */
385
- text-shadow: 0 0 2px rgba(255, 20, 147, 0.3) !important;
386
- font-size: 1.05em !important;
387
- font-weight: 400 !important;
388
- line-height: 1.6 !important;
389
- }
390
-
391
- /* Input fields with jade synthwave style */
392
- input[type="text"], textarea, select, .gr-dropdown {
393
- background: linear-gradient(135deg, rgba(0, 26, 26, 0.95) 0%, rgba(0, 51, 51, 0.9) 100%) !important;
394
- border: 2px solid #00ff8866 !important;
395
- color: #ff1493 !important; /* HOT PINK */
396
- box-shadow:
397
- inset 0 2px 8px rgba(0, 0, 0, 0.3),
398
- 0 0 20px rgba(0, 255, 136, 0.1) !important;
399
- backdrop-filter: blur(10px) !important;
400
- font-family: 'Exo 2', monospace !important;
401
- font-size: 1.1em !important;
402
- font-weight: 400 !important;
403
- padding: 12px !important;
404
- transition: all 0.2s ease !important;
405
- }
406
-
407
- input[type="text"]:focus, textarea:focus {
408
- border-color: #00ffcc !important;
409
- box-shadow:
410
- inset 0 2px 8px rgba(0, 0, 0, 0.2),
411
- 0 0 30px rgba(0, 255, 204, 0.3) !important;
412
- color: #ff1493 !important; /* HOT PINK */
413
- background: linear-gradient(135deg, rgba(0, 51, 51, 0.95) 0%, rgba(0, 77, 77, 0.9) 100%) !important;
414
- }
415
-
416
- /* Chatbot styling with jade theme - removed animation */
417
- #component-chat {
418
- background: linear-gradient(135deg, rgba(0, 26, 26, 0.9) 0%, rgba(0, 51, 51, 0.7) 100%) !important;
419
- border: 2px solid #00ff8866 !important;
420
- box-shadow:
421
- 0 0 20px rgba(0, 255, 136, 0.2),
422
- inset 0 0 20px rgba(0, 255, 136, 0.05) !important;
423
- position: relative;
424
- overflow: hidden;
425
- }
426
-
427
- #component-chat .message.user {
428
- background: linear-gradient(135deg, rgba(0, 51, 51, 0.9) 0%, rgba(0, 77, 77, 0.8) 100%) !important;
429
- color: #ff1493 !important; /* HOT PINK */
430
- border: 1px solid #00ff8844 !important;
431
- box-shadow:
432
- 0 4px 12px rgba(0, 0, 0, 0.3),
433
- 0 0 20px rgba(0, 255, 136, 0.1) !important;
434
- font-size: 1.05em !important;
435
- font-weight: 400 !important;
436
- }
437
-
438
- #component-chat .message.bot {
439
- background: linear-gradient(135deg, rgba(0, 26, 26, 0.95) 0%, rgba(0, 51, 51, 0.9) 100%) !important;
440
- color: #ff1493 !important; /* HOT PINK */
441
- border: 1px solid #00ffcc33 !important;
442
- box-shadow:
443
- 0 4px 12px rgba(0, 0, 0, 0.3),
444
- 0 0 20px rgba(0, 255, 204, 0.1) !important;
445
- font-size: 1.05em !important;
446
- font-weight: 400 !important;
447
- }
448
-
449
- /* Buttons with jade energy - subtle hover */
450
- .gr-button-primary {
451
- background: linear-gradient(135deg, #00ff88 0%, #00cc66 100%) !important;
452
- color: #001a1a !important;
453
- border: none !important;
454
- box-shadow:
455
- 0 4px 15px rgba(0, 255, 136, 0.3),
456
- 0 2px 5px rgba(0, 0, 0, 0.2) !important;
457
- text-transform: uppercase !important;
458
- letter-spacing: 3px !important;
459
- font-weight: 900 !important;
460
- font-size: 1.1em !important;
461
- position: relative;
462
- overflow: hidden;
463
- transition: all 0.2s ease !important;
464
- }
465
-
466
- .gr-button-primary:hover {
467
- background: linear-gradient(135deg, #00ffaa 0%, #00ff88 100%) !important;
468
- box-shadow:
469
- 0 6px 20px rgba(0, 255, 136, 0.4),
470
- 0 2px 5px rgba(0, 0, 0, 0.3) !important;
471
- transform: translateY(-1px);
472
- }
473
-
474
- .gr-button-secondary {
475
- background: transparent !important;
476
- color: #ff1493 !important; /* HOT PINK */
477
- border: 2px solid #00ffcc !important;
478
- box-shadow:
479
- 0 0 10px rgba(0, 255, 204, 0.2),
480
- inset 0 0 10px rgba(0, 255, 204, 0.05) !important;
481
- text-transform: uppercase !important;
482
- letter-spacing: 2px !important;
483
- font-weight: 700 !important;
484
- font-size: 1em !important;
485
- transition: all 0.2s ease !important;
486
- }
487
-
488
- .gr-button-secondary:hover {
489
- background: rgba(0, 255, 204, 0.1) !important;
490
- color: #ff69b4 !important; /* LIGHTER HOT PINK on hover */
491
- box-shadow:
492
- 0 0 15px rgba(0, 255, 204, 0.3),
493
- inset 0 0 15px rgba(0, 255, 204, 0.1) !important;
494
- }
495
-
496
- /* Sliders with jade glow */
497
- .gr-slider-track {
498
- background: linear-gradient(90deg, #003333 0%, #00ff88 50%, #00ffcc 100%) !important;
499
- height: 10px !important;
500
- box-shadow: 0 0 30px rgba(0, 255, 136, 0.6) !important;
501
- border-radius: 5px !important;
502
- }
503
-
504
- .gr-slider-thumb {
505
- background: radial-gradient(circle, #ffffff 30%, #00ff88 70%) !important;
506
- border: 3px solid #00ff88 !important;
507
- box-shadow:
508
- 0 0 30px rgba(0, 255, 136, 0.8),
509
- 0 0 50px rgba(0, 255, 136, 0.5) !important;
510
- width: 28px !important;
511
- height: 28px !important;
512
- cursor: grab !important;
513
- }
514
-
515
- .gr-slider-thumb:active {
516
- cursor: grabbing !important;
517
- box-shadow:
518
- 0 0 40px rgba(0, 255, 136, 1),
519
- 0 0 60px rgba(0, 255, 136, 0.7) !important;
520
- }
521
-
522
- /* Tabs with jade style */
523
- .tabs-nav button {
524
- background: linear-gradient(180deg, transparent 0%, rgba(0, 255, 136, 0.1) 100%) !important;
525
- color: #ff1493 !important; /* HOT PINK */
526
- border-bottom: 3px solid transparent !important;
527
- text-transform: uppercase !important;
528
- letter-spacing: 3px !important;
529
- font-weight: 700 !important;
530
- font-size: 1.1em !important;
531
- padding: 15px 30px !important;
532
- transition: all 0.3s ease !important;
533
- }
534
-
535
- .tabs-nav button.selected {
536
- background: linear-gradient(180deg, rgba(0, 255, 136, 0.2) 0%, rgba(0, 255, 136, 0.3) 100%) !important;
537
- color: #ff69b4 !important; /* LIGHTER HOT PINK */
538
- border-bottom: 3px solid #00ff88 !important;
539
- text-shadow: 0 0 20px #ff1493 !important;
540
- box-shadow: 0 10px 30px -10px rgba(0, 255, 136, 0.5) !important;
541
- }
542
-
543
- .tabs-nav button:hover {
544
- color: #ff69b4 !important; /* LIGHTER HOT PINK */
545
- text-shadow: 0 0 15px #ff1493 !important;
546
- background: linear-gradient(180deg, transparent 0%, rgba(0, 255, 204, 0.15) 100%) !important;
547
- }
548
-
549
- /* Status display with jade style - no pulsing */
550
- #status_display {
551
- background: linear-gradient(135deg, rgba(0, 255, 136, 0.15) 0%, rgba(0, 255, 204, 0.1) 100%) !important;
552
- border: 2px solid #00ff88 !important;
553
- color: #ff1493 !important; /* HOT PINK */
554
- text-shadow: 0 0 5px rgba(255, 20, 147, 0.4) !important;
555
- padding: 20px !important;
556
- margin: 30px 0 !important;
557
- box-shadow:
558
- 0 0 20px rgba(0, 255, 136, 0.2),
559
- inset 0 0 15px rgba(0, 255, 136, 0.05) !important;
560
- font-weight: 700 !important;
561
- text-transform: uppercase !important;
562
- letter-spacing: 2px !important;
563
- font-size: 1.2em !important;
564
- position: relative;
565
- overflow: hidden;
566
- }
567
-
568
- /* File upload with jade style */
569
- .gr-file-input {
570
- background: rgba(0, 255, 136, 0.05) !important;
571
- border: 3px dashed #00ff88 !important;
572
- transition: all 0.3s ease !important;
573
- position: relative;
574
- overflow: hidden;
575
- }
576
-
577
- .gr-file-input::before {
578
- content: "";
579
- position: absolute;
580
- top: 0;
581
- left: -100%;
582
- width: 100%;
583
- height: 100%;
584
- background: linear-gradient(90deg, transparent, rgba(0, 255, 136, 0.2), transparent);
585
- animation: file-scan 3s linear infinite;
586
- }
587
-
588
- @keyframes file-scan {
589
- 0% { left: -100%; }
590
- 100% { left: 100%; }
591
- }
592
-
593
- .gr-file-input:hover {
594
- background: rgba(0, 255, 136, 0.15) !important;
595
- border-color: #00ffcc !important;
596
- box-shadow:
597
- 0 0 40px rgba(0, 255, 136, 0.5),
598
- inset 0 0 30px rgba(0, 255, 136, 0.1) !important;
599
- }
600
-
601
- /* Dropdown styling with better readability */
602
- .gr-dropdown-item {
603
- background: rgba(0, 26, 26, 0.95) !important;
604
- color: #ff1493 !important; /* HOT PINK */
605
- border-bottom: 1px solid #00ff8833 !important;
606
- font-size: 1.05em !important;
607
- font-weight: 400 !important;
608
- padding: 12px !important;
609
- }
610
-
611
- .gr-dropdown-item.selected {
612
- background: rgba(0, 102, 102, 0.4) !important;
613
- color: #ff69b4 !important; /* LIGHTER HOT PINK */
614
- }
615
-
616
- .gr-dropdown-item:hover {
617
- background: rgba(0, 255, 136, 0.25) !important;
618
- color: #ff69b4 !important; /* LIGHTER HOT PINK */
619
- text-shadow: 0 0 10px rgba(255, 20, 147, 0.5) !important;
620
- }
621
-
622
- /* Table styling with jade theme */
623
- table {
624
- border: 3px solid #00ff8866 !important;
625
- box-shadow: 0 0 30px rgba(0, 255, 136, 0.4) !important;
626
- background: rgba(0, 26, 26, 0.8) !important;
627
- }
628
-
629
- th {
630
- background: linear-gradient(135deg, rgba(0, 255, 136, 0.4) 0%, rgba(0, 204, 102, 0.4) 100%) !important;
631
- color: #ff1493 !important; /* HOT PINK */
632
- text-shadow: 0 0 10px rgba(255, 20, 147, 0.7) !important;
633
- font-weight: 700 !important;
634
- padding: 15px !important;
635
- }
636
-
637
- td {
638
- border: 1px solid #00ff8844 !important;
639
- color: #ff1493 !important; /* HOT PINK */
640
- padding: 12px !important;
641
- font-size: 1.05em !important;
642
- }
643
-
644
- /* Scrollbar with jade glow */
645
- ::-webkit-scrollbar {
646
- width: 14px !important;
647
- background: rgba(0, 26, 26, 0.9) !important;
648
- }
649
-
650
- ::-webkit-scrollbar-thumb {
651
- background: linear-gradient(180deg, #00ff88 0%, #00cc66 50%, #00ffcc 100%) !important;
652
- border-radius: 7px !important;
653
- box-shadow:
654
- 0 0 15px rgba(0, 255, 136, 0.6),
655
- inset 0 0 10px rgba(255, 255, 255, 0.3) !important;
656
- }
657
-
658
- ::-webkit-scrollbar-thumb:hover {
659
- background: linear-gradient(180deg, #00ffaa 0%, #00ff88 50%, #00ffee 100%) !important;
660
- box-shadow:
661
- 0 0 20px rgba(0, 255, 136, 0.8),
662
- inset 0 0 15px rgba(255, 255, 255, 0.4) !important;
663
- }
664
-
665
- /* Links with jade glow */
666
- a {
667
- color: #ff1493 !important; /* HOT PINK */
668
- text-decoration: none !important;
669
- text-shadow: 0 0 8px rgba(255, 20, 147, 0.6) !important;
670
- transition: all 0.3s ease !important;
671
- }
672
-
673
- a:hover {
674
- color: #ff69b4 !important; /* LIGHTER HOT PINK */
675
- text-shadow: 0 0 15px rgba(255, 20, 147, 0.9) !important;
676
- }
677
-
678
- /* Enhanced readability for all text */
679
- * {
680
- -webkit-font-smoothing: antialiased !important;
681
- -moz-osx-font-smoothing: grayscale !important;
682
- }
683
-
684
- /* Force all text to be hot pink */
685
- gradio-app *,
686
- .gradio-container *,
687
- label, span, div, p, h1, h2, h3, h4, h5, h6,
688
- .gr-box *, .gr-form *, .gr-panel * {
689
- color: #ff1493 !important; /* HOT PINK FOR EVERYTHING */
690
- }
691
-
692
- /* Specific overrides for better visibility */
693
- .gr-info {
694
- color: #ff69b4 !important; /* LIGHTER HOT PINK for info text */
695
- }
696
-
697
- .gr-markdown, .gr-markdown * {
698
- color: #ff1493 !important; /* HOT PINK for markdown */
699
- }
700
-
701
- /* Keep primary button text dark for contrast */
702
- .gr-button-primary, .gr-button-primary * {
703
- color: #001a1a !important; /* Dark text on bright button */
704
- }
705
-
706
- /* File upload with jade style */
707
- .gr-file-input {
708
- background: rgba(0, 255, 136, 0.05) !important;
709
- border: 3px dashed #00ff88 !important;
710
- color: #ff1493 !important; /* HOT PINK */
711
- transition: all 0.3s ease !important;
712
- position: relative;
713
- overflow: hidden;
714
- }
715
-
716
- .gr-upload-text {
717
- color: #ff1493 !important; /* HOT PINK */
718
- text-shadow: 0 0 3px rgba(255, 20, 147, 0.5) !important;
719
- }
720
-
721
- /* Labels and other text elements */
722
- .gr-label {
723
- color: #ff1493 !important; /* HOT PINK */
724
- font-size: 1.1em !important;
725
- }
726
-
727
- .gr-text-input {
728
- color: #ff1493 !important; /* HOT PINK */
729
- font-size: 1em !important;
730
- }
731
-
732
- /* Loading animations */
733
- .gr-loading {
734
- color: #ff1493 !important; /* HOT PINK */
735
- }
736
-
737
- /* Make all text more readable */
738
- .gr-prose, .gr-text-input, .message-body {
739
- font-size: 1.05em !important;
740
- line-height: 1.6 !important;
741
- font-weight: 400 !important;
742
- color: #ff1493 !important; /* HOT PINK */
743
- }
744
-
745
- /* Update placeholder text */
746
- input::placeholder, textarea::placeholder {
747
- color: #ff149380 !important; /* HOT PINK with transparency */
748
- }
749
-
750
- /* Special love message animation */
751
- @keyframes love-pulse {
752
- 0%, 100% { transform: scale(1); opacity: 0.8; }
753
- 50% { transform: scale(1.1); opacity: 1; }
754
- }
755
-
756
- .love-message {
757
- animation: love-pulse 3s ease-in-out infinite;
758
- text-shadow: 0 0 20px #ff1493;
759
- color: #ff1493 !important; /* HOT PINK */
760
- }
761
-
762
- /* Jade synthwave sun/orb - keeping green for contrast */
763
- .jade-orb {
764
- width: 300px;
765
- height: 300px;
766
- background: radial-gradient(circle at 30% 30%, #00ffee, #00ff88 40%, #00cc66 70%, #008844);
767
- border-radius: 50%;
768
- position: fixed;
769
- top: -150px;
770
- right: 10%;
771
- box-shadow:
772
- 0 0 100px #00ff88,
773
- 0 0 200px #00cc66,
774
- 0 0 300px #008844,
775
- inset 0 0 80px rgba(255, 255, 255, 0.2);
776
- z-index: -1;
777
- animation: orb-float 10s ease-in-out infinite;
778
- }
779
-
780
- @keyframes orb-float {
781
- 0%, 100% { transform: translateY(0) scale(1); }
782
- 50% { transform: translateY(-30px) scale(1.05); }
783
- }
784
-
785
- /* Retro scan lines with jade tint */
786
- body::after {
787
- content: "";
788
- position: fixed;
789
- top: 0;
790
- left: 0;
791
- width: 100%;
792
- height: 100%;
793
- background: repeating-linear-gradient(
794
- 0deg,
795
- transparent,
796
- transparent 2px,
797
- rgba(0, 255, 136, 0.02) 2px,
798
- rgba(0, 255, 136, 0.02) 4px
799
- );
800
- pointer-events: none;
801
- z-index: 1;
802
- }
803
- """
804
-
805
- JAVASCRIPT_CODE = """
806
- // Force dark mode
807
- function forceDarkMode() {
808
- document.documentElement.classList.add('dark');
809
- }
810
- document.addEventListener('DOMContentLoaded', forceDarkMode);
811
-
812
- // Add jade orb element and minimal particles
813
- document.addEventListener('DOMContentLoaded', () => {
814
- // Create vignette overlay
815
- const vignette = document.createElement('div');
816
- vignette.className = 'vignette-overlay';
817
- document.body.appendChild(vignette);
818
-
819
- // Create perspective grid
820
- const grid = document.createElement('div');
821
- grid.className = 'grid-background';
822
- document.body.appendChild(grid);
823
-
824
- // Create jade orb
825
- const orb = document.createElement('div');
826
- orb.className = 'jade-orb';
827
- document.body.appendChild(orb);
828
-
829
- // Create fewer, slower floating particles
830
- for (let i = 0; i < 15; i++) {
831
- const particle = document.createElement('div');
832
- particle.style.position = 'fixed';
833
- particle.style.width = Math.random() * 3 + 1 + 'px';
834
- particle.style.height = particle.style.width;
835
- particle.style.background = i % 2 === 0 ? '#ff149333' : '#ff69b433'; // Semi-transparent hot pink
836
- particle.style.borderRadius = '50%';
837
- particle.style.left = Math.random() * 100 + '%';
838
- particle.style.top = Math.random() * 100 + '%';
839
- particle.style.boxShadow = `0 0 ${Math.random() * 5 + 2}px currentColor`;
840
- particle.style.animation = `particle-float ${Math.random() * 30 + 20}s linear infinite`;
841
- particle.style.zIndex = '-1';
842
- particle.style.opacity = '0.3';
843
- document.body.appendChild(particle);
844
- }
845
-
846
- // Auto-scroll chat
847
- const chat = document.querySelector('gradio-app')?.shadowRoot?.querySelector('#component-chat .scroll-hide');
848
- if (chat) {
849
- new MutationObserver(() => {
850
- chat.scrollTop = chat.scrollHeight;
851
- }).observe(chat, { childList: true, subtree: true });
852
- }
853
- });
854
-
855
- // Create particle float animation
856
- const style = document.createElement('style');
857
- style.textContent = `
858
- @keyframes particle-float {
859
- from { transform: translateY(100vh) rotate(0deg); opacity: 0; }
860
- 20% { opacity: 0.3; }
861
- 80% { opacity: 0.3; }
862
- to { transform: translateY(-100vh) rotate(180deg); opacity: 0; }
863
- }
864
- `;
865
- document.head.appendChild(style);
866
-
867
- // Very subtle CRT flicker effect
868
- let flickerInterval;
869
- document.addEventListener('DOMContentLoaded', () => {
870
- flickerInterval = setInterval(() => {
871
- if (Math.random() > 0.995) {
872
- document.body.style.opacity = '0.99';
873
- setTimeout(() => { document.body.style.opacity = '1'; }, 30);
874
- }
875
- }, 500);
876
- });
877
-
878
- // Clean up on page unload
879
- window.addEventListener('unload', () => {
880
- if (flickerInterval) clearInterval(flickerInterval);
881
- });
882
- """
883
-
884
- synthwave_theme = gr.themes.Base(
885
- primary_hue=gr.themes.Color(
886
- "#00cc66", "#00dd77", "#00ee88", "#00ff88", "#00ff99",
887
- "#00ffaa", "#00ffbb", "#00ffcc", "#00ffdd", "#00ffee", "#00ffff"
888
- ),
889
- neutral_hue=gr.themes.Color(
890
- "#000a0a", "#001a1a", "#002626", "#003333", "#004040",
891
- "#004d4d", "#005959", "#006666", "#007373", "#008080", "#008c8c"
892
- )
893
- )
894
-
895
- with gr.Blocks(
896
- title="πŸ’š JadeGPT - Future Wife of Anthony Edition",
897
- theme=synthwave_theme,
898
- analytics_enabled=False,
899
- ) as demo:
900
- gr.HTML(f"""
901
- <style>
902
- {SYNTHWAVE_CSS}
903
- </style>
904
- <script>
905
- {JAVASCRIPT_CODE}
906
- </script>
907
- <div style="text-align: center; max-width: 900px; margin: 0 auto; padding-top: 50px; position: relative;">
908
- <h1 data-text="πŸ’š JADE GPT πŸ’š">πŸ’š JADE GPT πŸ’š</h1>
909
- <p style="color: #ff1493; font-size: 1.3em; text-transform: uppercase; letter-spacing: 4px; margin-bottom: 10px;">
910
- Neural Interface v3.14 β€’ Jade Sector β€’ Year 2084
911
- </p>
912
- <p class="love-message" style="color: #ff69b4; font-size: 1.1em; letter-spacing: 2px; opacity: 0.9;">
913
- ✨ Future Wife of Anthony Edition ✨
914
- </p>
915
- </div>
916
- """)
917
-
918
- status_display = gr.Markdown(value="πŸ’š JADE SYSTEMS ONLINE β€’ LOVE PROTOCOLS ACTIVE β€’ READY FOR ANTHONY", elem_id="status_display")
919
-
920
- with gr.Tab("πŸ’š NEURAL INTERFACE"):
921
- sys_txt = gr.Textbox(
922
- "You are JadeGPT, an advanced AI with a heart of jade and circuits of pure love. Created in honor of Jade, Anthony's future wife, you embody intelligence, warmth, and a touch of synthwave romance. Your responses shimmer with jade-green wisdom and cyberpunk poetry. You help with code, life, and everything in between, always with style and grace. Every solution you provide is infused with the same care and attention that Jade brings to Anthony's life.",
923
- lines=4,
924
- label="πŸ’š JADE PROTOCOL MATRIX",
925
- info="Core personality configuration - Powered by love and synthwave",
926
- interactive=True
927
- )
928
-
929
- chat = gr.Chatbot(
930
- value=[("", "πŸ’š *The jade lights pulse with warmth as our neural connection establishes*\n\nWelcome, dear user. I am JadeGPT, crafted with love and dedicated to Anthony's future bride, Jade. Like her, I'm here to bring light, wisdom, and a touch of magic to your digital journey. How may I illuminate your path today?")],
931
- label="πŸ’š TRANSMISSION LOG",
932
- height=500,
933
- show_copy_button=True,
934
- layout="bubble"
935
- )
936
-
937
- with gr.Row():
938
- with gr.Column(scale=4):
939
- usr_in = gr.Textbox(
940
- placeholder="Share your thoughts with Jade's digital heart...",
941
- show_label=False,
942
- container=False,
943
- autofocus=True,
944
- elem_id="user_input_textbox"
945
- )
946
- file_upload = gr.File(
947
- label="πŸ’š UPLOAD LOVE LETTER (or code file)",
948
- type="filepath",
949
- file_count="single",
950
- file_types=[".txt", ".py", ".md", ".json", ".csv", ".log", ".xml", ".html"],
951
- interactive=True
952
- )
953
- with gr.Column(scale=1):
954
- send_btn = gr.Button("πŸ’š TRANSMIT", variant="primary", scale=1)
955
- clear_btn = gr.Button("✨ NEW CHAPTER", variant="secondary", scale=1)
956
-
957
- with gr.Row():
958
- ex_list = [
959
- "Create a jade-themed loading animation",
960
- "Build a love calculator app for Anthony & Jade",
961
- "Design a synthwave wedding invitation website",
962
- "Generate romantic code poetry",
963
- "Create a heart-shaped CSS animation",
964
- "Build a couple's milestone tracker",
965
- "Design a jade crystal particle system"
966
- ]
967
- ex_drop = gr.Dropdown(ex_list, label="πŸ’š ROMANTIC PROTOCOLS", info="Pre-configured queries with love")
968
- ex_btn = gr.Button("πŸ’Ž LOAD", scale=0)
969
-
970
- with gr.Tab("πŸ’š JADE CONFIG"):
971
- gr.Markdown("### πŸ’Ž JADE NEURAL CALIBRATION")
972
- with gr.Row():
973
- mdl = gr.Dropdown(
974
- ALL_MODELS,
975
- value=ALL_MODELS[0],
976
- label="πŸ’š AI LOVE CORE SELECTION",
977
- info="Choose your jade-powered neural processor",
978
- elem_id="model_dropdown"
979
- )
980
- model_description = gr.Markdown(value=MODEL_DETAILS.get(ALL_MODELS[0], {}).get("description", "No data available in this timeline."), elem_id="model_description")
981
-
982
- with gr.Row():
983
- ctx_s = gr.Slider(
984
- minimum=1000,
985
- maximum=max(mdl_data["max_context"] for mdl_data in MODEL_DETAILS.values()) if MODEL_DETAILS else 128_000,
986
- step=256,
987
- value=DEFAULT_MAX_CONTEXT,
988
- label="πŸ’Ž JADE MEMORY CRYSTALS (TOKENS)",
989
- info="Maximum love-powered neural capacity",
990
- interactive=True
991
- )
992
- rep_s = gr.Slider(
993
- minimum=100,
994
- maximum=4096,
995
- step=100,
996
- value=DEFAULT_REPLY_MAX,
997
- label="πŸ’š HEART OUTPUT STREAM (TOKENS)",
998
- info="Maximum transmission of digital affection",
999
- interactive=True
1000
- )
1001
-
1002
- gr.Markdown("### πŸ’° LOVE TOKEN EXCHANGE RATES")
1003
- pricing_table_html = """
1004
- <table style="width:100%; border-collapse: collapse; border: 3px solid #00ff8866;">
1005
- <thead>
1006
- <tr style="background: linear-gradient(135deg, rgba(0, 255, 136, 0.4) 0%, rgba(0, 204, 102, 0.4) 100%);">
1007
- <th style="border: 1px solid #00ff8866; padding: 12px; color: #ffffff; text-shadow: 0 0 10px rgba(255,255,255,0.7);">πŸ’š JADE CORE</th>
1008
- <th style="border: 1px solid #00ff8866; padding: 12px; color: #ffffff; text-shadow: 0 0 10px rgba(255,255,255,0.7);">INPUT LOVE</th>
1009
- <th style="border: 1px solid #00ff8866; padding: 12px; color: #ffffff; text-shadow: 0 0 10px rgba(255,255,255,0.7);">OUTPUT LOVE</th>
1010
- <th style="border: 1px solid #00ff8866; padding: 12px; color: #ffffff; text-shadow: 0 0 10px rgba(255,255,255,0.7);">MAX HEARTS</th>
1011
- </tr>
1012
- </thead>
1013
- <tbody>
1014
- """
1015
- for model_name, details in MODEL_DETAILS.items():
1016
- pricing_table_html += f"""
1017
- <tr style="background: rgba(0, 26, 26, 0.8);">
1018
- <td style="border: 1px solid #00ff8844; padding: 12px; color: #00ff88; font-weight: 600;">{model_name}</td>
1019
- <td style="border: 1px solid #00ff8844; padding: 12px; color: #00ffcc;">${details['input_price']:.2f}</td>
1020
- <td style="border: 1px solid #00ff8844; padding: 12px; color: #00ffcc;">${details['output_price']:.2f}</td>
1021
- <td style="border: 1px solid #00ff8844; padding: 12px; color: #00ffcc;">{details['max_context']:,} πŸ’š</td>
1022
- </tr>
1023
- """
1024
- pricing_table_html += "</tbody></table>"
1025
- gr.HTML(pricing_table_html)
1026
-
1027
- def update_model_info(selected_model):
1028
- max_ctx = MODEL_DETAILS.get(selected_model, {}).get("max_context", DEFAULT_MAX_CONTEXT)
1029
- description = MODEL_DETAILS.get(selected_model, {}).get("description", "No data available in this timeline.")
1030
- return gr.Slider(value=max_ctx, label="πŸ’Ž JADE MEMORY CRYSTALS (TOKENS)"), gr.Markdown(value=description)
1031
-
1032
- mdl.change(
1033
- fn=update_model_info,
1034
- inputs=mdl,
1035
- outputs=[ctx_s, model_description]
1036
- )
1037
 
1038
- send_btn.click(
1039
- chat_stream,
1040
- inputs=[usr_in, chat, sys_txt, mdl, ctx_s, rep_s, file_upload],
1041
- outputs=[chat, usr_in, status_display, file_upload]
 
 
 
 
 
 
 
 
1042
  )
1043
- usr_in.submit(
1044
- chat_stream,
1045
- inputs=[usr_in, chat, sys_txt, mdl, ctx_s, rep_s, file_upload],
1046
- outputs=[chat, usr_in, status_display, file_upload]
1047
- )
1048
-
1049
- clear_btn.click(clear_chat, outputs=[chat, usr_in, status_display, file_upload, ex_drop])
1050
-
1051
- ex_btn.click(lambda q: q or "", inputs=ex_drop, outputs=usr_in)
1052
- ex_drop.change(lambda q: q or "", inputs=ex_drop, outputs=usr_in)
1053
-
1054
- demo.queue(max_size=32, default_concurrency_limit=int(os.getenv("JADEGPT_CONCURRENCY", "2")))
1055
-
1056
- if __name__ == "__main__":
1057
- demo.launch()
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import openai
3
  import tiktoken
4
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ # Configure API key
7
+ openai.api_key = os.getenv("OPENAI_API_KEY")
8
+ if not openai.api_key:
9
+ raise ValueError("Please set the OPENAI_API_KEY environment variable.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ # Available models (comma-separated in env or fallback list)
12
+ _env_models = os.getenv("OPENAI_MODEL_LIST", "gpt-3.5-turbo,gpt-4")
13
+ ALL_MODELS = [m.strip() for m in _env_models.split(",") if m.strip()]
14
  if not ALL_MODELS:
15
+ ALL_MODELS = ["gpt-3.5-turbo"]
 
 
 
 
 
 
 
16
 
17
+ # Token counter using tiktoken
18
+ @functools.lru_cache(maxsize=64)
19
+ def _get_encoding(model: str):
 
 
 
20
  try:
21
+ return tiktoken.encoding_for_model(model)
22
  except KeyError:
23
+ return tiktoken.get_encoding("cl100k_base")
 
 
 
 
 
24
 
25
+ def count_tokens(text: str, model: str) -> int:
26
+ enc = _get_encoding(model)
27
+ return len(enc.encode(text))
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ # Read uploaded file
30
+ def read_file_content(file_obj):
31
+ if not file_obj:
32
  return ""
 
33
  try:
34
+ content = file_obj.read().decode('utf-8')
35
+ return f"\n\n--- Start of file: {file_obj.name} ---\n{content}\n--- End of file ---\n"
36
+ except Exception:
37
+ return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
+ # Chat response function
40
+ def respond(message, history, model_name, file_obj):
41
+ history = history or []
42
+ # Attach file content if uploaded
43
+ file_text = read_file_content(file_obj)
44
+ full_input = message + file_text
45
+
46
+ # Send to OpenAI
47
+ messages = [{"role": "user", "content": full_input}]
48
+ resp = openai.ChatCompletion.create(
49
+ model=model_name,
50
+ messages=messages
51
  )
52
+ reply = resp.choices[0].message.content
53
+
54
+ # Token usage estimates
55
+ prompt_tokens = count_tokens(full_input, model_name)
56
+ completion_tokens = count_tokens(reply, model_name)
57
+ usage_info = f"(Tokens used: prompt={prompt_tokens}, completion={completion_tokens})"
58
+
59
+ # Update history
60
+ history.append((message, f"{reply}\n\n{usage_info}"))
61
+ return history
62
+
63
+ # Build Gradio interface
64
+ with gr.Blocks() as demo:
65
+ model_dropdown = gr.Dropdown(ALL_MODELS, value=ALL_MODELS[0], label="Select Model")
66
+ chatbot = gr.Chatbot(label="Chat with AI")
67
+ file_upload = gr.File(label="Upload File", file_types=[".txt", ".md", ".py"], type="file")
68
+ user_input = gr.Textbox(placeholder="Type your message...", show_label=False)
69
+
70
+ # Link interactions
71
+ user_input.submit(respond, inputs=[user_input, chatbot, model_dropdown, file_upload], outputs=chatbot)
72
+ user_input.submit(lambda: "", None, user_input)
73
+ demo.launch()