manvithll commited on
Commit
c96d7fa
·
verified ·
1 Parent(s): 7cb3b6f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -181
app.py CHANGED
@@ -1,15 +1,8 @@
1
- # yellowflash_topbar_models.py
2
- # - Model dropdown top-left
3
- # - Title top-right
4
- # - Large always-visible chatbox (full output area)
5
- # - Bottom input row fixed with 90/10 split and ➤ send icon
6
- # - Gemini + LLaMA-4 support, hardcoded keys for testing (DO NOT PUBLISH)
7
-
8
- import time, requests, gradio as gr
9
-
10
- # ==========================
11
- # HARDCODED TEST KEYS (TESTING ONLY)
12
- # ==========================
13
  GEMINI_KEY = "AIzaSyAPfDiu2V_aD6un00qHt5bkISm6C0Pkx7o"
14
  GEMINI_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent"
15
 
@@ -17,9 +10,10 @@ GROQ_KEY = "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"
17
  GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
18
  GROQ_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct"
19
 
20
- # ==========================
21
- # Helpers
22
- # ==========================
 
23
  def post_with_retries(url, headers, payload, timeout=20, max_retries=3):
24
  for i in range(max_retries):
25
  try:
@@ -27,29 +21,31 @@ def post_with_retries(url, headers, payload, timeout=20, max_retries=3):
27
  r.raise_for_status()
28
  return r
29
  except Exception:
30
- if i == max_retries-1:
31
  raise
32
  time.sleep(0.7 + i)
33
 
 
34
  def call_gemini(api_key, user_message, history):
35
- headers = {"Content-Type":"application/json", "x-goog-api-key": api_key}
36
  contents = []
37
  for u_disp, a_disp in (history or []):
38
- contents.append({"role":"user","parts":[{"text":u_disp}]})
39
- contents.append({"role":"model","parts":[{"text":a_disp}]})
40
- contents.append({"role":"user","parts":[{"text":user_message}]})
41
  payload = {"contents": contents}
42
  r = post_with_retries(GEMINI_URL, headers, payload)
43
  data = r.json()
44
  return data.get("candidates", [{}])[0].get("content", {}).get("parts", [{}])[0].get("text", "")
45
 
46
- def call_llama_via_groq(api_key, model, user_message, history):
 
47
  headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
48
  messages = []
49
  for u_disp, a_disp in (history or []):
50
- messages.append({"role":"user", "content": u_disp})
51
- messages.append({"role":"assistant", "content": a_disp})
52
- messages.append({"role":"user", "content": user_message})
53
  payload = {"model": model, "messages": messages}
54
  r = post_with_retries(GROQ_URL, headers, payload)
55
  data = r.json()
@@ -60,179 +56,58 @@ def call_llama_via_groq(api_key, model, user_message, history):
60
  return ch.get("text", "")
61
  return str(data)
62
 
63
- # ==========================
64
- # Chat logic (manual Chatbot)
65
- # Keep displayed history tuples (You: ... , YellowFlash: ...)
66
- # ==========================
67
- def send_message(user_text, chat_history, model_choice):
68
- """
69
- Inputs:
70
- - user_text: raw string from textbox
71
- - chat_history: list of tuples (display_user, display_ai)
72
- - model_choice: string (dropdown)
73
- Returns:
74
- - updated chat_history (list of tuples)
75
- - clears textbox ("")
76
- """
77
- if not user_text or not user_text.strip():
78
- return chat_history or [], ""
79
  try:
80
  if model_choice == "Google Gemini 2.0 Flash":
81
- reply = call_gemini(GEMINI_KEY, user_text, chat_history or [])
82
  else:
83
- reply = call_llama_via_groq(GROQ_KEY, GROQ_MODEL, user_text, chat_history or [])
84
  except Exception as e:
85
  reply = f"Error: {e}"
 
86
 
87
- # Brand messages for display. Keep tuple format required by gr.Chatbot
88
- user_display = f"You: {user_text}"
89
- ai_display = f"YellowFlash: {reply}"
90
- new_hist = (chat_history or []) + [(user_display, ai_display)]
91
- return new_hist, ""
92
-
93
- # ==========================
94
- # CSS + JS
95
- # - top-left dropdown, top-right title
96
- # - chatbox full area (always visible)
97
- # - input row fixed bottom, 90/10 split
98
- # - z-index and padding to ensure textbox is clickable
99
- # - MutationObserver auto-scroll
100
- # ==========================
101
- CSS = r"""
102
- /* Base */
103
- body { margin:0; background:#0d0d0d; color:#fff; font-family:Inter, Arial, sans-serif; }
104
 
105
- /* Topbar row: dropdown left, title right */
106
- #topbar { display:flex; justify-content:space-between; align-items:center; padding:18px 28px; background:#0f0f0f; border-bottom:1px solid #1f1f1f; }
 
 
 
 
107
  #title { font-weight:800; color:#ffcc33; font-size:20px; }
108
 
109
- /* Make dropdown look compact and flat (left) */
110
- #modeldd .gr-dropdown { background:transparent !important; border:1px solid #2b2b2b !important; color:#ddd !important; padding:10px 12px !important; border-radius:8px !important; width:220px !important; box-shadow:none !important; }
111
-
112
- /* Chatbox container: large and centered */
113
- .chat-wrapper { padding:20px; }
114
- .chatbox {
115
- width:96%;
116
- max-width:1400px;
117
- height: calc(100vh - 180px); /* leave space for topbar + input row */
118
- margin: 16px auto;
119
- background:#151515;
120
- border-radius:10px;
121
- border:1px solid #262626;
122
- padding:20px;
123
- box-sizing:border-box;
124
- overflow:auto;
125
- position:relative;
126
- z-index:1;
127
- }
128
-
129
- /* Ensure chatbox always visible even when empty */
130
- .chatbox:empty::after {
131
- content: "";
132
- display:block;
133
- height:0;
134
- }
135
-
136
- /* Input row fixed bottom: centered, 90/10 split */
137
- #inputrow {
138
- position:fixed;
139
- left:0; right:0;
140
- bottom:28px;
141
- width:96%;
142
- max-width:1400px;
143
- margin:0 auto;
144
- display:flex;
145
- gap:12px;
146
- align-items:center;
147
- z-index:2147483647;
148
- pointer-events:auto;
149
- }
150
-
151
- /* Textbox (90%) */
152
- #msgbox {
153
- flex:9;
154
- min-height:56px;
155
- border-radius:12px;
156
- background:#0e0e0e;
157
- color:#ddd;
158
- padding:14px;
159
- border:1px solid #262626;
160
- resize:none;
161
- font-size:15px;
162
- pointer-events:auto;
163
- }
164
-
165
- /* Send button (10%) */
166
- #sendbtn {
167
- flex:1;
168
- height:56px;
169
- border-radius:12px;
170
- background:#2cc3ff;
171
- color:#000;
172
- border:none;
173
- display:flex;
174
- align-items:center;
175
- justify-content:center;
176
- font-size:20px;
177
- cursor:pointer;
178
- pointer-events:auto;
179
- }
180
-
181
- /* Gradio message bubble neutralization */
182
- .gradio-chatbot .message { background:transparent !important; border:none !important; padding:0 !important; color:inherit !important; pointer-events:auto; }
183
-
184
- /* Small screens */
185
- @media (max-width:900px) {
186
- #modeldd .gr-dropdown { width:45vw !important; }
187
- .chatbox { width:96%; height: calc(100vh - 220px); }
188
- #inputrow { width:96%; bottom:18px; }
189
- }
190
- """
191
 
192
- JS = """
193
- <script>
194
- (function(){
195
- // auto-scroll the chatbox whenever its content changes
196
- function setup() {
197
- const cb = document.querySelector('.chatbox');
198
- if (!cb) return setTimeout(setup, 200);
199
- const obs = new MutationObserver(() => { cb.scrollTop = cb.scrollHeight; });
200
- obs.observe(cb, {childList:true, subtree:true, characterData:true});
201
- cb.scrollTop = cb.scrollHeight;
202
- }
203
- if (document.readyState === "complete" || document.readyState === "interactive") setup();
204
- else document.addEventListener("DOMContentLoaded", setup);
205
- })();
206
- </script>
207
  """
208
 
209
- # ==========================
210
- # Build Gradio UI (Blocks)
211
- # ==========================
 
212
  with gr.Blocks(css=CSS, title="⚡ YellowFlash.ai") as demo:
213
- # Top bar: model dropdown left, title right (no extra inputs section)
214
  with gr.Row(elem_id="topbar"):
215
- model_dd = gr.Dropdown(choices=["Google Gemini 2.0 Flash", "Meta LLaMA 4"],
216
- value="Google Gemini 2.0 Flash",
217
- show_label=False,
218
- elem_id="modeldd")
 
219
  gr.HTML("<div id='title'>⚡ YellowFlash.ai</div>")
220
 
221
- # Chat display area (always visible, even empty)
222
- with gr.Column(elem_classes="chat-wrapper"):
223
- chat_display = gr.Chatbot(elem_classes="chatbox", label="", show_label=False)
224
-
225
- # Input row fixed at bottom (90/10 split)
226
- with gr.Row(elem_id="inputrow"):
227
- txt = gr.Textbox(placeholder="Type a message...", elem_id="msgbox", show_label=False, lines=2)
228
- send = gr.Button("➤", elem_id="sendbtn")
229
-
230
- # Wire events: outputs [chat_display, txt] -> update chat and clear textbox
231
- send.click(send_message, inputs=[txt, chat_display, model_dd], outputs=[chat_display, txt])
232
- txt.submit(send_message, inputs=[txt, chat_display, model_dd], outputs=[chat_display, txt])
233
-
234
- # inject JS for auto-scroll
235
- gr.HTML(JS)
236
 
237
  demo.queue()
238
  demo.launch(share=True)
 
1
+ import gradio as gr, requests, time
2
+
3
+ # =================
4
+ # HARDCODED KEYS (TESTING)
5
+ # =================
 
 
 
 
 
 
 
6
  GEMINI_KEY = "AIzaSyAPfDiu2V_aD6un00qHt5bkISm6C0Pkx7o"
7
  GEMINI_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent"
8
 
 
10
  GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
11
  GROQ_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct"
12
 
13
+
14
+ # =================
15
+ # API Call Helpers
16
+ # =================
17
  def post_with_retries(url, headers, payload, timeout=20, max_retries=3):
18
  for i in range(max_retries):
19
  try:
 
21
  r.raise_for_status()
22
  return r
23
  except Exception:
24
+ if i == max_retries - 1:
25
  raise
26
  time.sleep(0.7 + i)
27
 
28
+
29
  def call_gemini(api_key, user_message, history):
30
+ headers = {"Content-Type": "application/json", "x-goog-api-key": api_key}
31
  contents = []
32
  for u_disp, a_disp in (history or []):
33
+ contents.append({"role": "user", "parts": [{"text": u_disp}]})
34
+ contents.append({"role": "model", "parts": [{"text": a_disp}]})
35
+ contents.append({"role": "user", "parts": [{"text": user_message}]})
36
  payload = {"contents": contents}
37
  r = post_with_retries(GEMINI_URL, headers, payload)
38
  data = r.json()
39
  return data.get("candidates", [{}])[0].get("content", {}).get("parts", [{}])[0].get("text", "")
40
 
41
+
42
+ def call_llama(api_key, model, user_message, history):
43
  headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
44
  messages = []
45
  for u_disp, a_disp in (history or []):
46
+ messages.append({"role": "user", "content": u_disp})
47
+ messages.append({"role": "assistant", "content": a_disp})
48
+ messages.append({"role": "user", "content": user_message})
49
  payload = {"model": model, "messages": messages}
50
  r = post_with_retries(GROQ_URL, headers, payload)
51
  data = r.json()
 
56
  return ch.get("text", "")
57
  return str(data)
58
 
59
+
60
+ # =================
61
+ # Chat Function
62
+ # =================
63
+ def chat_fn(message, history, model_choice):
 
 
 
 
 
 
 
 
 
 
 
64
  try:
65
  if model_choice == "Google Gemini 2.0 Flash":
66
+ reply = call_gemini(GEMINI_KEY, message, history)
67
  else:
68
+ reply = call_llama(GROQ_KEY, GROQ_MODEL, message, history)
69
  except Exception as e:
70
  reply = f"Error: {e}"
71
+ return reply
72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
+ # =================
75
+ # CSS
76
+ # =================
77
+ CSS = """
78
+ body { margin:0; background:#0d0d0d; color:#fff; font-family:Inter, Arial, sans-serif; }
79
+ #topbar { display:flex; justify-content:space-between; align-items:center; padding:16px 28px; background:#0f0f0f; border-bottom:1px solid #1f1f1f; }
80
  #title { font-weight:800; color:#ffcc33; font-size:20px; }
81
 
82
+ /* Chatbox fullscreen */
83
+ .chatbot { height: calc(100vh - 160px) !important; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
+ /* Send button arrow */
86
+ button.lg\\:secondary { background:#2cc3ff !important; border-radius:8px !important; color:#000 !important; font-size:18px !important; }
87
+ button.lg\\:secondary::after { content:"➤"; font-size:18px; }
 
 
 
 
 
 
 
 
 
 
 
 
88
  """
89
 
90
+
91
+ # =================
92
+ # Build UI
93
+ # =================
94
  with gr.Blocks(css=CSS, title="⚡ YellowFlash.ai") as demo:
95
+ # Topbar
96
  with gr.Row(elem_id="topbar"):
97
+ model_dd = gr.Dropdown(
98
+ choices=["Google Gemini 2.0 Flash", "Meta LLaMA 4"],
99
+ value="Google Gemini 2.0 Flash",
100
+ show_label=False,
101
+ )
102
  gr.HTML("<div id='title'>⚡ YellowFlash.ai</div>")
103
 
104
+ # Chat Interface (native, not manual textbox)
105
+ gr.ChatInterface(
106
+ fn=lambda msg, hist: chat_fn(msg, hist, model_dd.value),
107
+ chatbot=gr.Chatbot(elem_classes="chatbot"),
108
+ textbox=gr.Textbox(placeholder="Type a message...", container=False),
109
+ additional_inputs=[model_dd],
110
+ )
 
 
 
 
 
 
 
 
111
 
112
  demo.queue()
113
  demo.launch(share=True)