manvithll commited on
Commit
a35144e
·
verified ·
1 Parent(s): c0aa1d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -221
app.py CHANGED
@@ -1,35 +1,25 @@
1
- # yellowflash_final_complete.py
2
- # Single file: Gemini + LLaMA-4, 90/10 input, branded messages, clickable input, auto-scroll.
3
- # TESTING ONLY: Hardcoded API keys (do NOT publish)
4
 
5
- import time, requests, gradio as gr
 
 
6
 
7
- # ==========================
8
- # HARDCODED TEST KEYS (YOU GAVE THESE)
9
- # ==========================
10
  GEMINI_KEY = "AIzaSyAPfDiu2V_aD6un00qHt5bkISm6C0Pkx7o"
11
- GROQ_KEY = "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"
12
-
13
- MODELS = {
14
- "Google Gemini 2.0 Flash": {
15
- "api_url": "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent",
16
- "api_key": GEMINI_KEY,
17
- "handler": "gemini",
18
- "description": "Google's advanced AI model"
19
- },
20
- "Meta LLaMA 4": {
21
- "api_url": "https://api.groq.com/openai/v1/chat/completions",
22
- "api_key": GROQ_KEY,
23
- "model_name": "meta-llama/llama-4-scout-17b-16e-instruct",
24
- "handler": "openai_compat",
25
- "description": "Meta's large language model"
26
- }
27
- }
28
-
29
- # ==========================
30
- # HTTP helper with retries
31
- # ==========================
32
- def post_with_retries(url, headers, payload, timeout=20, max_retries=3):
33
  for i in range(max_retries):
34
  try:
35
  r = requests.post(url, headers=headers, json=payload, timeout=timeout)
@@ -38,205 +28,84 @@ def post_with_retries(url, headers, payload, timeout=20, max_retries=3):
38
  except Exception as e:
39
  if i == max_retries - 1:
40
  raise
41
- time.sleep(1 + i)
42
- raise Exception("Max retries exceeded")
43
-
44
- # ==========================
45
- # Model callers
46
- # ==========================
47
- def call_gemini(api_url, api_key, history, user_message):
48
- headers = {"Content-Type": "application/json", "x-goog-api-key": api_key}
49
- # Build contents: we send the displayed text. You can strip "You: " prefixes if desired.
50
- contents = []
51
- for u_display, a_display in (history or []):
52
- contents.append({"role":"user","parts":[{"text":u_display}]})
53
- contents.append({"role":"model","parts":[{"text":a_display}]})
54
- contents.append({"role":"user","parts":[{"text":user_message}]})
55
- payload = {"contents": contents}
56
- r = post_with_retries(api_url, headers, payload)
57
- data = r.json()
58
- return data.get("candidates", [{}])[0].get("content", {}).get("parts", [{}])[0].get("text", "")
59
-
60
- def call_openai_compat(api_url, api_key, model_name, history, user_message):
61
- headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
62
- messages = []
63
- for u_display, a_display in (history or []):
64
- messages.append({"role":"user","content":u_display})
65
- messages.append({"role":"assistant","content":a_display})
66
- messages.append({"role":"user","content":user_message})
67
- payload = {"model": model_name, "messages": messages}
68
- r = post_with_retries(api_url, headers, payload)
69
- data = r.json()
70
- if "choices" in data and data["choices"]:
71
- ch = data["choices"][0]
72
- # OpenAI-compatible shape:
73
- if isinstance(ch.get("message"), dict):
74
- return ch["message"].get("content", "")
75
- return ch.get("text", "")
76
- return str(data)
77
-
78
- # ==========================
79
- # Chat function (Gradio Chatbot expects list of tuples)
80
- # Returns [ (user_display, ai_display), ... ] , and "" (clear textbox)
81
- # ==========================
82
- def chat_fn_send(txt_value, history, model_choice):
83
- if not txt_value or not txt_value.strip():
84
- return history or [], ""
85
- cfg = MODELS.get(model_choice)
86
- if not cfg:
87
- return history or [], ""
88
  try:
89
- if cfg["handler"] == "gemini":
90
- reply = call_gemini(cfg["api_url"], cfg["api_key"], history or [], txt_value)
91
- else:
92
- reply = call_openai_compat(cfg["api_url"], cfg["api_key"], cfg["model_name"], history or [], txt_value)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  except Exception as e:
94
- reply = f"Error: {e}"
95
- # Brand messages for display, keep tuple format
96
- user_display = f"You: {txt_value}"
97
- ai_display = f"YellowFlash: {reply}"
98
- new_history = (history or []) + [(user_display, ai_display)]
99
- return new_history, "" # clear textbox
100
-
101
- # ==========================
102
- # CSS + JS (auto-scroll via MutationObserver)
103
- # ==========================
104
- CSS = r"""
105
- /* Base */
106
- body { margin:0; background:#0d0d0d; color:#fff; font-family:Inter, Arial, sans-serif; }
107
-
108
- /* Topbar */
109
- #topbar { background:#0f0f0f; padding:18px 24px; display:flex; align-items:center; justify-content:space-between; }
110
- #title { font-weight:700; font-size:20px; color:#ffcc33; }
111
-
112
- /* Chat wrapper and chatbox */
113
- .chat-wrapper { padding:18px; }
114
- .chatbox {
115
- background:#151515;
116
- border-radius:8px;
117
- border:1px solid #2b2b2b;
118
- height:68vh;
119
- width:90%;
120
- margin:0 auto;
121
- padding:16px;
122
- padding-bottom:220px; /* make space for the fixed input row */
123
- overflow:auto;
124
- position:relative;
125
- z-index:1;
126
- pointer-events:auto;
127
- }
128
-
129
- /* Input row: fixed, centered, 90/10 split, high z-index so it's clickable */
130
- #inputrow {
131
- position:fixed;
132
- left:0; right:0;
133
- bottom:32px;
134
- width:90%;
135
- margin:0 auto;
136
- display:flex;
137
- gap:12px;
138
- align-items:center;
139
- z-index:2147483647; /* top */
140
- pointer-events:auto;
141
- }
142
-
143
- /* Textbox (90%) */
144
- #msgbox {
145
- flex:9;
146
- background:#0e0e0e;
147
- color:#ddd;
148
- border-radius:12px;
149
- padding:14px;
150
- border:1px solid #262626;
151
- min-height:56px;
152
- resize:none;
153
- pointer-events:auto;
154
- font-size:15px;
155
- }
156
-
157
- /* Remove default label if present */
158
- #msgbox label { display:none !important; }
159
-
160
- /* Send button (10%) */
161
- #sendbtn {
162
- flex:1;
163
- background:#2cc3ff;
164
- color:#000;
165
- border-radius:12px;
166
- font-weight:800;
167
- height:56px;
168
- border:none;
169
- cursor:pointer;
170
- display:flex;
171
- align-items:center;
172
- justify-content:center;
173
- font-size:20px;
174
- pointer-events:auto;
175
- }
176
-
177
- /* neutralize Gradio bubble box interfering */
178
- .gradio-chatbot .message {
179
- background: transparent;
180
- border: none;
181
- padding: 0;
182
- color: inherit;
183
- pointer-events:auto;
184
- }
185
-
186
- /* Responsive adjustments */
187
- @media (max-width:900px) {
188
- .chatbox { width:96%; padding-bottom:260px; }
189
- #inputrow { width:96%; bottom:18px; }
190
- }
191
- """
192
 
193
- # JS: auto-scroll to bottom of chatbox whenever it changes
194
- # This MutationObserver will scroll the chat element to the bottom on new nodes.
195
- JS = """
196
- <script>
197
- (function(){
198
- const selector = ".chatbox"; // matches our chat element
199
- function setupObserver(){
200
- const el = document.querySelector(selector);
201
- if(!el) return setTimeout(setupObserver, 300);
202
- const obs = new MutationObserver(()=>{ el.scrollTop = el.scrollHeight; });
203
- obs.observe(el, { childList:true, subtree:true, characterData:true });
204
- // ensure initial scroll
205
- el.scrollTop = el.scrollHeight;
206
- }
207
- // run after DOM ready
208
- if (document.readyState === "complete" || document.readyState === "interactive") {
209
- setupObserver();
210
- } else {
211
- document.addEventListener("DOMContentLoaded", setupObserver);
212
- }
213
- })();
214
- </script>
215
- """
216
 
217
- # ==========================
218
- # BUILD GRADIO APP
219
- # ==========================
220
- with gr.Blocks(css=CSS, title="⚡ YellowFlash.ai") as demo:
221
- # topbar: left dropdown small, right title
222
- with gr.Row(elem_id="topbar"):
223
- model_dd = gr.Dropdown(list(MODELS.keys()), value=list(MODELS.keys())[0], elem_id="modeldd", show_label=False)
224
- gr.HTML("<div id='title'>�� YellowFlash.ai</div>")
225
 
226
- # chat display uses Gradio Chatbot (expects list of tuples)
227
- chat_display = gr.Chatbot(elem_classes="chatbox", label="", show_label=False)
 
228
 
229
- # input row: 90/10 (textbox + send icon)
230
- with gr.Row(elem_id="inputrow"):
231
- txt = gr.Textbox(placeholder="Type a message...", elem_id="msgbox", show_label=False, lines=2)
232
- send = gr.Button("➤", elem_id="sendbtn")
233
 
234
- # wire events: outputs are [chat_display, txt] (history, cleared textbox)
235
- send.click(chat_fn_send, inputs=[txt, chat_display, model_dd], outputs=[chat_display, txt])
236
- txt.submit(chat_fn_send, inputs=[txt, chat_display, model_dd], outputs=[chat_display, txt])
237
 
238
- # Inject JS for auto-scroll
239
- gr.HTML(JS)
 
240
 
241
- demo.queue()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
  demo.launch(share=True)
 
1
+ # yellowflash_chatinterface_with_dropdown.py
2
+ # Keep ChatInterface (so input is reliably clickable), add model dropdown + styling.
3
+ # TESTING ONLY: Hardcoded keys included (do NOT publish)
4
 
5
+ import gradio as gr
6
+ import requests
7
+ import time
8
 
9
+ # -------------------------
10
+ # HARDCODED TEST KEYS (testing only)
11
+ # -------------------------
12
  GEMINI_KEY = "AIzaSyAPfDiu2V_aD6un00qHt5bkISm6C0Pkx7o"
13
+ GEMINI_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent"
14
+
15
+ GROQ_KEY = "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"
16
+ GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
17
+ GROQ_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct"
18
+
19
+ # -------------------------
20
+ # helper with retries
21
+ # -------------------------
22
+ def post_with_retries(url, headers, payload, timeout=15, max_retries=2):
 
 
 
 
 
 
 
 
 
 
 
 
23
  for i in range(max_retries):
24
  try:
25
  r = requests.post(url, headers=headers, json=payload, timeout=timeout)
 
28
  except Exception as e:
29
  if i == max_retries - 1:
30
  raise
31
+ time.sleep(0.6 + i)
32
+
33
+ # -------------------------
34
+ # main chat function used by ChatInterface
35
+ # signature: (message, history, extra_input)
36
+ # -------------------------
37
+ def chat_fn(message, history, model_choice):
38
+ # keep original ChatInterface behavior: return a string (assistant reply)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  try:
40
+ if model_choice == "Google Gemini 2.0 Flash":
41
+ headers = {"Content-Type": "application/json", "x-goog-api-key": GEMINI_KEY}
42
+ payload = {"contents": [{"parts": [{"text": message}]}]}
43
+ resp = post_with_retries(GEMINI_URL, headers, payload)
44
+ data = resp.json()
45
+ # safe grab
46
+ ans = data.get("candidates", [{}])[0].get("content", {}).get("parts", [{}])[0].get("text", "")
47
+ if not ans:
48
+ ans = "Gemini returned no content."
49
+ return ans
50
+
51
+ else: # Meta LLaMA 4 via Groq (OpenAI-compatible)
52
+ headers = {"Authorization": f"Bearer {GROQ_KEY}", "Content-Type": "application/json"}
53
+ payload = {"model": GROQ_MODEL, "messages": [{"role": "user", "content": message}]}
54
+ resp = post_with_retries(GROQ_URL, headers, payload)
55
+ data = resp.json()
56
+ # handle both styles
57
+ if "choices" in data and data["choices"]:
58
+ ch = data["choices"][0]
59
+ if isinstance(ch.get("message"), dict):
60
+ return ch["message"].get("content", "")
61
+ return ch.get("text", "")
62
+ return str(data)
63
+
64
  except Exception as e:
65
+ return f"Error: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
+ # -------------------------
68
+ # small CSS to style ChatInterface
69
+ # -------------------------
70
+ css = """
71
+ /* general */
72
+ body { background: #0d0d0d; color: #fff; font-family: Inter, Arial, sans-serif; }
73
+
74
+ /* container width */
75
+ .gradio-container { max-width: 1200px; margin: 0 auto; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
+ /* Title area */
78
+ h1, h2 { color: #ffcc33 !important; }
 
 
 
 
 
 
79
 
80
+ /* Chat bubbles (visual only) */
81
+ .message.user { background: #2cc3ff !important; color: #000 !important; border-radius: 10px !important; padding: 8px 12px !important; }
82
+ .message.bot { background: #151515 !important; color: #ddd !important; border: 1px solid #2b2b2b !important; border-radius: 10px !important; padding: 8px 12px !important; }
83
 
84
+ /* Input area */
85
+ textarea.svelte-textarea { background: #0e0e0e !important; color: #fff !important; border-radius: 10px !important; border: 1px solid #262626 !important; }
86
+ .gr-button { background: #2cc3ff !important; color: #000 !important; border-radius: 10px !important; font-weight: 700 !important; }
 
87
 
88
+ /* Make sure Gradio's interface doesn't overlay the input */
89
+ .gradio-container .chatbox { padding-bottom: 120px !important; }
 
90
 
91
+ /* Dropdown small & flat */
92
+ .model-dd .gr-dropdown { background: transparent !important; border: 1px solid #2b2b2b !important; border-radius: 8px !important; padding: 8px 12px !important; color: #ddd !important; box-shadow:none !important; }
93
+ """
94
 
95
+ # -------------------------
96
+ # build the UI: ChatInterface + dropdown as additional input
97
+ # keep the exact ChatInterface usage you had
98
+ # -------------------------
99
+ model_selector = gr.Dropdown(choices=["Google Gemini 2.0 Flash", "Meta LLaMA 4"],
100
+ value="Google Gemini 2.0 Flash",
101
+ label="Model",
102
+ elem_classes="model-dd")
103
+
104
+ demo = gr.ChatInterface(fn=chat_fn,
105
+ title="⚡ yellowflash.ai",
106
+ description="Select model from dropdown and chat.",
107
+ additional_inputs=[model_selector],
108
+ css=css)
109
+
110
+ # Launch
111
  demo.launch(share=True)