manvithll commited on
Commit
6d938d8
·
verified ·
1 Parent(s): fec03b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +137 -59
app.py CHANGED
@@ -1,98 +1,171 @@
1
  # yellowflash_with_perplexity.py
2
  # TEST ONLY: hardcoded keys included (do NOT publish)
3
 
4
- import time, traceback, requests
 
 
5
  import gradio as gr
6
 
 
 
 
 
 
7
  # ---------------------------
8
  # HARDCODED KEYS (TESTING)
9
  # ---------------------------
10
  GEMINI_KEY = "AIzaSyAPfDiu2V_aD6un00qHt5bkISm6C0Pkx7o"
11
- GEMINI_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent"
 
12
 
13
  GROQ_KEY = "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"
14
- GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
 
15
  GROQ_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct"
16
 
17
- # ---------------------------
18
- # Helpers
19
- # ---------------------------
20
- def post_with_retries(url, headers, payload, timeout=18, max_retries=2):
21
- for i in range(max_retries):
22
- try:
23
- r = requests.post(url, headers=headers, json=payload, timeout=timeout)
24
- r.raise_for_status()
25
- return r
26
- except Exception as e:
27
- if i == max_retries - 1:
28
- raise
29
- time.sleep(0.5 + i)
30
- raise Exception("Max retries exceeded")
31
 
32
  # ---------------------------
33
- # Model callers
34
  # ---------------------------
35
- def call_gemini(api_key, message, history):
36
- headers = {"Content-Type": "application/json", "x-goog-api-key": api_key}
37
  contents = []
38
- for u, m in history:
39
- contents.append({"role":"user","parts":[{"text":u}]})
40
- contents.append({"role":"model","parts":[{"text":m}]})
41
- contents.append({"role":"user","parts":[{"text":message}]})
42
- payload = {"contents": contents}
43
- r = post_with_retries(GEMINI_URL, headers, payload)
44
- data = r.json()
45
- return data.get("candidates",[{}])[0].get("content",{}).get("parts",[{}])[0].get("text","")
46
-
47
- def call_llama_via_groq(api_key, model, message, history):
48
- headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  msgs = []
50
- for u, m in history:
51
- msgs.append({"role":"user","content":u})
52
- msgs.append({"role":"assistant","content":m})
53
- msgs.append({"role":"user","content":message})
54
- payload = {"model": model, "messages": msgs}
55
- r = post_with_retries(GROQ_URL, headers, payload)
56
- data = r.json()
57
- if "choices" in data and data["choices"]:
58
- ch = data["choices"][0]
59
- if isinstance(ch.get("message"), dict):
60
- return ch["message"].get("content","")
61
- return ch.get("text","")
62
- return str(data)
 
 
 
 
 
 
 
 
 
 
 
63
 
64
  # ---------------------------
65
- # Chat function
66
  # ---------------------------
67
  def chat_fn(message, history, model_choice):
68
  try:
69
  if model_choice == "Google Gemini 2.0 Flash":
70
- return call_gemini(GEMINI_KEY, message, history)
 
71
  elif model_choice == "Meta LLaMA 4":
72
- return call_llama_via_groq(GROQ_KEY, GROQ_MODEL, message, history)
 
73
  else:
74
- return f"Unknown model: {model_choice}"
75
  except Exception as e:
76
- return f"Error: {e}\n{traceback.format_exc()}"
77
 
78
  # ---------------------------
79
- # Dark Mode CSS (your original)
80
  # ---------------------------
81
  css = """
82
- /* topbar layout */
83
  #topbar { display:flex; justify-content:space-between; align-items:center;
84
  padding:18px 28px; background:#0f0f0f; border-bottom:1px solid #1f1f1f; }
85
  #title { font-weight:800; color:#ffcc33; font-size:20px; }
86
 
87
- /* compact, flat dropdown look */
88
- #model_dropdown .gr-dropdown { background:#1a1a1a !important; border:1px solid #2b2b2b !important;
89
- color:#ddd !important; padding:10px 12px !important; border-radius:8px !important;
90
- width:260px !important; box-shadow:none !important; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
  /* make ChatInterface chat area taller */
93
  .gradio-container .chat-interface .chatbot { min-height: calc(100vh - 220px); background:#111; color:#eee; }
94
 
95
- /* style send button */
96
  .gr-button { border-radius:10px !important; background:#2c2c3f !important; color:#fff !important; }
97
  """
98
 
@@ -105,15 +178,20 @@ with gr.Blocks(css=css, title="⚡ YellowFlash.ai") as app:
105
  choices=["Google Gemini 2.0 Flash", "Meta LLaMA 4"],
106
  value="Google Gemini 2.0 Flash",
107
  show_label=False,
108
- elem_id="model_dropdown"
 
109
  )
110
 
111
-
112
  gr.ChatInterface(
113
  fn=chat_fn,
114
  title="⚡ YellowFlash.ai",
115
  description="under development",
116
  additional_inputs=[model_dropdown],
 
117
  )
118
 
119
- app.launch(share=True)
 
 
 
 
1
  # yellowflash_with_perplexity.py
2
  # TEST ONLY: hardcoded keys included (do NOT publish)
3
 
4
+ import time
5
+ import traceback
6
+ import requests
7
  import gradio as gr
8
 
9
+ # New Imports for Streaming
10
+ from google import genai
11
+ from groq import Groq
12
+ from groq.types.chat import ChatCompletionChunk
13
+
14
  # ---------------------------
15
  # HARDCODED KEYS (TESTING)
16
  # ---------------------------
17
  GEMINI_KEY = "AIzaSyAPfDiu2V_aD6un00qHt5bkISm6C0Pkx7o"
18
+ # NOTE: Gemini Python SDK handles the URL internally for streaming
19
+ # GEMINI_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent"
20
 
21
  GROQ_KEY = "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"
22
+ # NOTE: Groq Python SDK handles the URL internally
23
+ GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
24
  GROQ_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct"
25
 
26
+ # Initialize Clients
27
+ try:
28
+ # Use the hardcoded key directly in the client for testing
29
+ GEMINI_CLIENT = genai.Client(api_key=GEMINI_KEY)
30
+ GROQ_CLIENT = Groq(api_key=GROQ_KEY)
31
+ except Exception as e:
32
+ # This prevents the app from crashing if keys are completely invalid/missing
33
+ print(f"WARNING: Failed to initialize one or both API clients: {e}")
34
+ GEMINI_CLIENT = None
35
+ GROQ_CLIENT = None
 
 
 
 
36
 
37
  # ---------------------------
38
+ # Model callers (UPDATED FOR STREAMING)
39
  # ---------------------------
40
+ def call_gemini_stream(message, history):
41
+ # History format conversion for the SDK
42
  contents = []
43
+ for user_msg, model_msg in history:
44
+ contents.append({"role": "user", "parts": [{"text": user_msg}]})
45
+ contents.append({"role": "model", "parts": [{"text": model_msg}]})
46
+ contents.append({"role": "user", "parts": [{"text": message}]})
47
+
48
+ if GEMINI_CLIENT is None:
49
+ yield "Error: Gemini client not initialized. Check API key."
50
+ return
51
+
52
+ # Use the streaming method and yield chunks
53
+ response_stream = GEMINI_CLIENT.models.generate_content_stream(
54
+ model="gemini-2.0-flash", # Using the name directly
55
+ contents=contents
56
+ )
57
+
58
+ full_response = ""
59
+ for chunk in response_stream:
60
+ # Check if text content exists and is not empty
61
+ if chunk.text:
62
+ full_response += chunk.text
63
+ yield full_response
64
+
65
+ def call_llama_via_groq_stream(message, history):
66
+ # History format conversion to OpenAI/Groq messages
67
  msgs = []
68
+ for user_msg, model_msg in history:
69
+ msgs.append({"role": "user", "content": user_msg})
70
+ msgs.append({"role": "assistant", "content": model_msg})
71
+ msgs.append({"role": "user", "content": message})
72
+
73
+ if GROQ_CLIENT is None:
74
+ yield "Error: Groq client not initialized. Check API key."
75
+ return
76
+
77
+ # Use the streaming method with stream=True
78
+ response_stream = GROQ_CLIENT.chat.completions.create(
79
+ model=GROQ_MODEL,
80
+ messages=msgs,
81
+ stream=True # Critical for streaming
82
+ )
83
+
84
+ full_response = ""
85
+ for chunk in response_stream:
86
+ # Check if the chunk contains content delta
87
+ if isinstance(chunk, ChatCompletionChunk) and chunk.choices:
88
+ content = chunk.choices[0].delta.content
89
+ if content:
90
+ full_response += content
91
+ yield full_response
92
 
93
  # ---------------------------
94
+ # Chat function (UPDATED to be a GENERATOR)
95
  # ---------------------------
96
  def chat_fn(message, history, model_choice):
97
  try:
98
  if model_choice == "Google Gemini 2.0 Flash":
99
+ # Delegate to the streaming function
100
+ yield from call_gemini_stream(message, history)
101
  elif model_choice == "Meta LLaMA 4":
102
+ # Delegate to the streaming function
103
+ yield from call_llama_via_groq_stream(message, history)
104
  else:
105
+ yield f"Unknown model: {model_choice}"
106
  except Exception as e:
107
+ yield f"Error: {e}\n{traceback.format_exc()}"
108
 
109
  # ---------------------------
110
+ # CSS (UPDATED for smoother dropdown look)
111
  # ---------------------------
112
  css = """
113
+ /* Topbar layout */
114
  #topbar { display:flex; justify-content:space-between; align-items:center;
115
  padding:18px 28px; background:#0f0f0f; border-bottom:1px solid #1f1f1f; }
116
  #title { font-weight:800; color:#ffcc33; font-size:20px; }
117
 
118
+ /* ------------------------------------- */
119
+ /* ** DROPDOWN STYLING FOR SMOOTH LOOK ** */
120
+ /* ------------------------------------- */
121
+ #model_dropdown .gr-dropdown-wrap {
122
+ /* Hide the default arrow container for a cleaner look */
123
+ border: none !important;
124
+ box-shadow: none !important;
125
+ }
126
+
127
+ #model_dropdown .gr-dropdown-wrap > button {
128
+ /* Main button styling (the collapsed box) */
129
+ background:#1a1a1a !important;
130
+ border:1px solid #2b2b2b !important;
131
+ color:#ddd !important;
132
+ padding:10px 15px !important;
133
+ border-radius:10px !important;
134
+ box-shadow:none !important;
135
+ /* Smooth transition for hover effect */
136
+ transition: all 0.2s ease-in-out;
137
+ }
138
+ #model_dropdown .gr-dropdown-wrap > button:hover {
139
+ border-color: #ffcc33 !important;
140
+ background: #252525 !important;
141
+ }
142
+
143
+ /* Style for the options list when opened */
144
+ .gradio-container .gr-dropdown-options {
145
+ background: #1a1a1a !important;
146
+ border: 1px solid #ffcc33 !important; /* Accent border when open */
147
+ border-radius: 10px !important;
148
+ box-shadow: 0 4px 10px rgba(0, 0, 0, 0.5);
149
+ padding: 5px 0;
150
+ }
151
+ .gradio-container .gr-dropdown-options button {
152
+ /* Style for individual options */
153
+ color: #ddd !important;
154
+ padding: 10px 15px !important;
155
+ background: transparent !important;
156
+ transition: background 0.1s ease;
157
+ }
158
+ .gradio-container .gr-dropdown-options button:hover {
159
+ /* Highlight the selected option with the yellow accent */
160
+ background: #ffcc3320 !important; /* Light yellow background on hover */
161
+ color: #fff !important;
162
+ }
163
+ /* ------------------------------------- */
164
 
165
  /* make ChatInterface chat area taller */
166
  .gradio-container .chat-interface .chatbot { min-height: calc(100vh - 220px); background:#111; color:#eee; }
167
 
168
+ /* style send button (your original) */
169
  .gr-button { border-radius:10px !important; background:#2c2c3f !important; color:#fff !important; }
170
  """
171
 
 
178
  choices=["Google Gemini 2.0 Flash", "Meta LLaMA 4"],
179
  value="Google Gemini 2.0 Flash",
180
  show_label=False,
181
+ elem_id="model_dropdown",
182
+ scale=1 # Use scale to control width in the Row
183
  )
184
 
185
+ # We must explicitly set title and description here as per your original request
186
  gr.ChatInterface(
187
  fn=chat_fn,
188
  title="⚡ YellowFlash.ai",
189
  description="under development",
190
  additional_inputs=[model_dropdown],
191
+ # When streaming, the Submit button automatically becomes a Stop button
192
  )
193
 
194
+ if __name__ == "__main__":
195
+ # Remove share=True for Hugging Face deployment
196
+ # The launch() call is kept simple for the HF environment
197
+ app.launch()