manvithll commited on
Commit
9270b2b
·
verified ·
1 Parent(s): 6d938d8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -59
app.py CHANGED
@@ -3,39 +3,36 @@
3
 
4
  import time
5
  import traceback
6
- import requests
7
  import gradio as gr
8
 
9
- # New Imports for Streaming
 
10
  from google import genai
11
  from groq import Groq
12
  from groq.types.chat import ChatCompletionChunk
13
 
 
14
  # ---------------------------
15
  # HARDCODED KEYS (TESTING)
16
  # ---------------------------
17
  GEMINI_KEY = "AIzaSyAPfDiu2V_aD6un00qHt5bkISm6C0Pkx7o"
18
- # NOTE: Gemini Python SDK handles the URL internally for streaming
19
- # GEMINI_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent"
20
-
21
  GROQ_KEY = "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"
22
- # NOTE: Groq Python SDK handles the URL internally
23
- GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
24
  GROQ_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct"
25
 
26
  # Initialize Clients
 
 
27
  try:
28
- # Use the hardcoded key directly in the client for testing
29
  GEMINI_CLIENT = genai.Client(api_key=GEMINI_KEY)
30
  GROQ_CLIENT = Groq(api_key=GROQ_KEY)
31
  except Exception as e:
32
- # This prevents the app from crashing if keys are completely invalid/missing
33
  print(f"WARNING: Failed to initialize one or both API clients: {e}")
34
  GEMINI_CLIENT = None
35
  GROQ_CLIENT = None
36
 
 
37
  # ---------------------------
38
- # Model callers (UPDATED FOR STREAMING)
39
  # ---------------------------
40
  def call_gemini_stream(message, history):
41
  # History format conversion for the SDK
@@ -51,13 +48,12 @@ def call_gemini_stream(message, history):
51
 
52
  # Use the streaming method and yield chunks
53
  response_stream = GEMINI_CLIENT.models.generate_content_stream(
54
- model="gemini-2.0-flash", # Using the name directly
55
  contents=contents
56
  )
57
 
58
  full_response = ""
59
  for chunk in response_stream:
60
- # Check if text content exists and is not empty
61
  if chunk.text:
62
  full_response += chunk.text
63
  yield full_response
@@ -78,12 +74,11 @@ def call_llama_via_groq_stream(message, history):
78
  response_stream = GROQ_CLIENT.chat.completions.create(
79
  model=GROQ_MODEL,
80
  messages=msgs,
81
- stream=True # Critical for streaming
82
  )
83
 
84
  full_response = ""
85
  for chunk in response_stream:
86
- # Check if the chunk contains content delta
87
  if isinstance(chunk, ChatCompletionChunk) and chunk.choices:
88
  content = chunk.choices[0].delta.content
89
  if content:
@@ -91,107 +86,134 @@ def call_llama_via_groq_stream(message, history):
91
  yield full_response
92
 
93
  # ---------------------------
94
- # Chat function (UPDATED to be a GENERATOR)
95
  # ---------------------------
96
  def chat_fn(message, history, model_choice):
97
  try:
98
  if model_choice == "Google Gemini 2.0 Flash":
99
- # Delegate to the streaming function
100
  yield from call_gemini_stream(message, history)
101
  elif model_choice == "Meta LLaMA 4":
102
- # Delegate to the streaming function
103
  yield from call_llama_via_groq_stream(message, history)
104
  else:
105
  yield f"Unknown model: {model_choice}"
106
  except Exception as e:
107
- yield f"Error: {e}\n{traceback.format_exc()}"
108
 
109
  # ---------------------------
110
- # CSS (UPDATED for smoother dropdown look)
111
  # ---------------------------
112
  css = """
113
  /* Topbar layout */
114
- #topbar { display:flex; justify-content:space-between; align-items:center;
115
- padding:18px 28px; background:#0f0f0f; border-bottom:1px solid #1f1f1f; }
116
- #title { font-weight:800; color:#ffcc33; font-size:20px; }
 
 
 
 
 
 
 
 
117
 
118
  /* ------------------------------------- */
119
- /* ** DROPDOWN STYLING FOR SMOOTH LOOK ** */
120
  /* ------------------------------------- */
121
- #model_dropdown .gr-dropdown-wrap {
122
- /* Hide the default arrow container for a cleaner look */
123
- border: none !important;
124
- box-shadow: none !important;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  }
126
 
 
127
  #model_dropdown .gr-dropdown-wrap > button {
128
- /* Main button styling (the collapsed box) */
129
  background:#1a1a1a !important;
130
- border:1px solid #2b2b2b !important;
131
  color:#ddd !important;
132
- padding:10px 15px !important;
133
- border-radius:10px !important;
134
  box-shadow:none !important;
135
- /* Smooth transition for hover effect */
136
  transition: all 0.2s ease-in-out;
 
 
137
  }
138
  #model_dropdown .gr-dropdown-wrap > button:hover {
139
  border-color: #ffcc33 !important;
140
  background: #252525 !important;
141
  }
142
 
143
- /* Style for the options list when opened */
144
  .gradio-container .gr-dropdown-options {
145
  background: #1a1a1a !important;
146
- border: 1px solid #ffcc33 !important; /* Accent border when open */
147
  border-radius: 10px !important;
148
  box-shadow: 0 4px 10px rgba(0, 0, 0, 0.5);
149
  padding: 5px 0;
150
  }
151
- .gradio-container .gr-dropdown-options button {
152
- /* Style for individual options */
153
- color: #ddd !important;
154
- padding: 10px 15px !important;
155
- background: transparent !important;
156
- transition: background 0.1s ease;
157
- }
158
  .gradio-container .gr-dropdown-options button:hover {
159
- /* Highlight the selected option with the yellow accent */
160
- background: #ffcc3320 !important; /* Light yellow background on hover */
161
  color: #fff !important;
162
  }
 
163
  /* ------------------------------------- */
164
 
 
165
  /* make ChatInterface chat area taller */
166
  .gradio-container .chat-interface .chatbot { min-height: calc(100vh - 220px); background:#111; color:#eee; }
167
 
168
- /* style send button (your original) */
169
  .gr-button { border-radius:10px !important; background:#2c2c3f !important; color:#fff !important; }
170
  """
171
 
172
  # ---------------------------
173
- # Build UI
174
  # ---------------------------
175
  with gr.Blocks(css=css, title="⚡ YellowFlash.ai") as app:
176
  with gr.Row(elem_id="topbar"):
177
- model_dropdown = gr.Dropdown(
178
- choices=["Google Gemini 2.0 Flash", "Meta LLaMA 4"],
179
- value="Google Gemini 2.0 Flash",
180
- show_label=False,
181
- elem_id="model_dropdown",
182
- scale=1 # Use scale to control width in the Row
183
- )
184
-
185
- # We must explicitly set title and description here as per your original request
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  gr.ChatInterface(
187
  fn=chat_fn,
188
- title="⚡ YellowFlash.ai",
189
- description="under development",
190
  additional_inputs=[model_dropdown],
191
- # When streaming, the Submit button automatically becomes a Stop button
192
  )
193
 
194
  if __name__ == "__main__":
195
- # Remove share=True for Hugging Face deployment
196
- # The launch() call is kept simple for the HF environment
197
  app.launch()
 
3
 
4
  import time
5
  import traceback
 
6
  import gradio as gr
7
 
8
+ # New Imports for Streaming (must be in your requirements.txt)
9
+ # pip install google-genai groq
10
  from google import genai
11
  from groq import Groq
12
  from groq.types.chat import ChatCompletionChunk
13
 
14
+
15
  # ---------------------------
16
  # HARDCODED KEYS (TESTING)
17
  # ---------------------------
18
  GEMINI_KEY = "AIzaSyAPfDiu2V_aD6un00qHt5bkISm6C0Pkx7o"
 
 
 
19
  GROQ_KEY = "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"
 
 
20
  GROQ_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct"
21
 
22
  # Initialize Clients
23
+ # NOTE: Initialization inside the file is fine for testing, but in a real app,
24
+ # this should be wrapped in error handling or moved to environment variables.
25
  try:
 
26
  GEMINI_CLIENT = genai.Client(api_key=GEMINI_KEY)
27
  GROQ_CLIENT = Groq(api_key=GROQ_KEY)
28
  except Exception as e:
 
29
  print(f"WARNING: Failed to initialize one or both API clients: {e}")
30
  GEMINI_CLIENT = None
31
  GROQ_CLIENT = None
32
 
33
+
34
  # ---------------------------
35
+ # Model callers (STREAMING GENERATORS)
36
  # ---------------------------
37
  def call_gemini_stream(message, history):
38
  # History format conversion for the SDK
 
48
 
49
  # Use the streaming method and yield chunks
50
  response_stream = GEMINI_CLIENT.models.generate_content_stream(
51
+ model="gemini-2.0-flash",
52
  contents=contents
53
  )
54
 
55
  full_response = ""
56
  for chunk in response_stream:
 
57
  if chunk.text:
58
  full_response += chunk.text
59
  yield full_response
 
74
  response_stream = GROQ_CLIENT.chat.completions.create(
75
  model=GROQ_MODEL,
76
  messages=msgs,
77
+ stream=True
78
  )
79
 
80
  full_response = ""
81
  for chunk in response_stream:
 
82
  if isinstance(chunk, ChatCompletionChunk) and chunk.choices:
83
  content = chunk.choices[0].delta.content
84
  if content:
 
86
  yield full_response
87
 
88
  # ---------------------------
89
+ # Chat function (Main generator)
90
  # ---------------------------
91
  def chat_fn(message, history, model_choice):
92
  try:
93
  if model_choice == "Google Gemini 2.0 Flash":
 
94
  yield from call_gemini_stream(message, history)
95
  elif model_choice == "Meta LLaMA 4":
 
96
  yield from call_llama_via_groq_stream(message, history)
97
  else:
98
  yield f"Unknown model: {model_choice}"
99
  except Exception as e:
100
+ yield f"Error: An API error occurred: {type(e).__name__}. Check server logs."
101
 
102
  # ---------------------------
103
+ # CSS (Combined for Layout and Style)
104
  # ---------------------------
105
  css = """
106
  /* Topbar layout */
107
+ #topbar {
108
+ display:flex; justify-content:space-between; align-items:center;
109
+ padding:18px 28px; background:#0f0f0f; border-bottom:1px solid #1f1f1f;
110
+ min-height: 80px;
111
+ }
112
+
113
+ /* Main App Title */
114
+ #main-app-title {
115
+ font-weight:800; color:#ffcc33; font-size:20px;
116
+ margin: 0;
117
+ }
118
 
119
  /* ------------------------------------- */
120
+ /* ** COMPACT DROPDOWN AND TITLE AREA ** */
121
  /* ------------------------------------- */
122
+
123
+ /* Style the static title above the dropdown (e.g., "Google") */
124
+ #model-title {
125
+ font-size: 14px;
126
+ font-weight: 500;
127
+ color: #bbb;
128
+ margin-bottom: 2px !important;
129
+ padding: 0 !important;
130
+ line-height: 1.2;
131
+ }
132
+
133
+ /* Force the entire dropdown component to a small width */
134
+ #model_dropdown {
135
+ max-width: 140px;
136
+ min-width: 100px;
137
+ }
138
+
139
+ /* Hide Gradio's default label/placeholder text if any */
140
+ #model_dropdown .gr-dropdown-label {
141
+ display: none !important;
142
  }
143
 
144
+ /* Style the actual dropdown button to match the dark aesthetic */
145
  #model_dropdown .gr-dropdown-wrap > button {
 
146
  background:#1a1a1a !important;
147
+ border:1px solid #333 !important;
148
  color:#ddd !important;
149
+ padding:8px 10px !important; /* Smaller padding */
150
+ border-radius:8px !important;
151
  box-shadow:none !important;
 
152
  transition: all 0.2s ease-in-out;
153
+ font-size: 14px;
154
+ height: auto !important;
155
  }
156
  #model_dropdown .gr-dropdown-wrap > button:hover {
157
  border-color: #ffcc33 !important;
158
  background: #252525 !important;
159
  }
160
 
161
+ /* Style the options list when opened */
162
  .gradio-container .gr-dropdown-options {
163
  background: #1a1a1a !important;
164
+ border: 1px solid #ffcc33 !important;
165
  border-radius: 10px !important;
166
  box-shadow: 0 4px 10px rgba(0, 0, 0, 0.5);
167
  padding: 5px 0;
168
  }
 
 
 
 
 
 
 
169
  .gradio-container .gr-dropdown-options button:hover {
170
+ background: #ffcc3320 !important;
 
171
  color: #fff !important;
172
  }
173
+
174
  /* ------------------------------------- */
175
 
176
+
177
  /* make ChatInterface chat area taller */
178
  .gradio-container .chat-interface .chatbot { min-height: calc(100vh - 220px); background:#111; color:#eee; }
179
 
180
+ /* style send button */
181
  .gr-button { border-radius:10px !important; background:#2c2c3f !important; color:#fff !important; }
182
  """
183
 
184
  # ---------------------------
185
+ # Build UI (Final Layout)
186
  # ---------------------------
187
  with gr.Blocks(css=css, title="⚡ YellowFlash.ai") as app:
188
  with gr.Row(elem_id="topbar"):
189
+
190
+ # 1. Left Column: Model Title and Dropdown (Compact Look)
191
+ with gr.Column(scale=0, min_width=150):
192
+ # Static title for the model vendor/family
193
+ gr.Markdown("**Google**", elem_id="model-title")
194
+
195
+ model_dropdown = gr.Dropdown(
196
+ choices=["Google Gemini 2.0 Flash", "Meta LLaMA 4"],
197
+ value="Google Gemini 2.0 Flash",
198
+ show_label=False,
199
+ elem_id="model_dropdown",
200
+ scale=0
201
+ )
202
+
203
+ # 2. Right Column: The main app title (YellowFlash.ai)
204
+ with gr.Column(scale=1):
205
+ gr.Markdown(f'<span id="main-app-title">⚡ YellowFlash.ai</span>', elem_id="title_md")
206
+
207
+
208
+ # Keep the original "under development" text
209
+ gr.Markdown("under development")
210
+
211
  gr.ChatInterface(
212
  fn=chat_fn,
213
+ title="", # Title is now handled by the custom header row
214
+ description="",
215
  additional_inputs=[model_dropdown],
 
216
  )
217
 
218
  if __name__ == "__main__":
 
 
219
  app.launch()