manvithll commited on
Commit
7420099
·
verified ·
1 Parent(s): 1bbd05b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -169
app.py CHANGED
@@ -1,42 +1,72 @@
1
- # yellowflash_with_perplexity.py
2
- # TEST ONLY: hardcoded keys included (do NOT publish)
3
 
 
4
  import time
5
  import traceback
6
- import gradio as gr
7
 
8
- # New Imports for Streaming (must be in your requirements.txt: google-genai, groq)
9
  from google import genai
10
  from groq import Groq
11
- from groq.types.chat import ChatCompletionChunk
12
-
13
 
14
  # ---------------------------
15
- # HARDCODED KEYS (TESTING)
 
16
  # ---------------------------
17
  GEMINI_KEY = "AIzaSyAPfDiu2V_aD6un00qHt5bkISm6C0Pkx7o"
18
  GROQ_KEY = "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"
19
  GROQ_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct"
20
 
21
- # Initialize Clients
22
  try:
23
  GEMINI_CLIENT = genai.Client(api_key=GEMINI_KEY)
24
  GROQ_CLIENT = Groq(api_key=GROQ_KEY)
25
  except Exception as e:
26
- print(f"WARNING: Failed to initialize one or both API clients: {e}")
27
  GEMINI_CLIENT = None
28
  GROQ_CLIENT = None
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
  # ---------------------------
32
- # Model callers (STREAMING GENERATORS)
33
  # ---------------------------
34
- def call_gemini_stream(message, history):
 
35
  contents = []
36
- for user_msg, model_msg in history:
37
- contents.append({"role": "user", "parts": [{"text": user_msg}]})
38
- contents.append({"role": "model", "parts": [{"text": model_msg}]})
39
- contents.append({"role": "user", "parts": [{"text": message}]})
40
 
41
  if GEMINI_CLIENT is None:
42
  yield "Error: Gemini client not initialized. Check API key."
@@ -47,18 +77,14 @@ def call_gemini_stream(message, history):
47
  contents=contents
48
  )
49
 
50
- full_response = ""
51
  for chunk in response_stream:
52
  if chunk.text:
53
- full_response += chunk.text
54
- yield full_response
55
 
56
- def call_llama_via_groq_stream(message, history):
57
  msgs = []
58
- for user_msg, model_msg in history:
59
- msgs.append({"role": "user", "content": user_msg})
60
- msgs.append({"role": "assistant", "content": model_msg})
61
- msgs.append({"role": "user", "content": message})
62
 
63
  if GROQ_CLIENT is None:
64
  yield "Error: Groq client not initialized. Check API key."
@@ -70,155 +96,45 @@ def call_llama_via_groq_stream(message, history):
70
  stream=True
71
  )
72
 
73
- full_response = ""
74
  for chunk in response_stream:
75
- if isinstance(chunk, ChatCompletionChunk) and chunk.choices:
76
- content = chunk.choices[0].delta.content
77
- if content:
78
- full_response += content
79
- yield full_response
80
-
81
- # ---------------------------
82
- # Chat function (Main generator)
83
- # ---------------------------
84
- def chat_fn(message, history, model_choice):
85
- try:
86
- if model_choice == "Google Gemini 2.0 Flash":
87
- yield from call_gemini_stream(message, history)
88
- elif model_choice == "Meta LLaMA 4":
89
- yield from call_llama_via_groq_stream(message, history)
90
- else:
91
- yield f"Unknown model: {model_choice}"
92
- except Exception as e:
93
- yield f"Error: An API error occurred: {type(e).__name__}. Check server logs."
94
 
95
  # ---------------------------
96
- # CSS (Final Responsive Style)
97
  # ---------------------------
98
- css = """
99
- /* The topbar is a flexible row */
100
- #topbar {
101
- display:flex;
102
- justify-content:flex-start;
103
- align-items:center;
104
- gap: 20px;
105
- padding:18px 28px;
106
- background:#0f0f0f;
107
- border-bottom:1px solid #1f1f1f;
108
- min-height: 80px;
109
- flex-wrap: wrap; /* CRITICAL FOR MOBILE: Allows items to wrap */
110
- }
111
-
112
- /* Main App Title */
113
- #title {
114
- font-weight:800;
115
- color:#ffcc33;
116
- font-size:20px;
117
- margin: 0;
118
- flex-shrink: 0; /* Prevents title from shrinking */
119
- }
120
-
121
- /* ------------------------------------- */
122
- /* ** DROPDOWN STYLING (Wider & Polished) ** */
123
- /* ------------------------------------- */
124
-
125
- /* Set the default width for the dropdown component */
126
- #model_dropdown {
127
- width: 300px;
128
- min-width: 250px;
129
- }
130
-
131
- /* Hide Gradio's default label */
132
- #model_dropdown .gr-dropdown-label {
133
- display: none !important;
134
- }
135
-
136
- /* Style the actual dropdown button */
137
- #model_dropdown .gr-dropdown-wrap > button {
138
- background:#1a1a1a !important;
139
- border:1px solid #333 !important;
140
- color:#ddd !important;
141
- padding:10px 15px !important;
142
- border-radius:10px !important;
143
- box-shadow:none !important;
144
- transition: all 0.2s ease-in-out;
145
- font-size: 16px;
146
- height: auto !important;
147
- width: 100%;
148
- }
149
- #model_dropdown .gr-dropdown-wrap > button:hover {
150
- border-color: #ffcc33 !important;
151
- background: #252525 !important;
152
- }
153
-
154
- /* Style the options list when opened */
155
- .gradio-container .gr-dropdown-options {
156
- background: #1a1a1a !important;
157
- border: 1px solid #ffcc33 !important;
158
- border-radius: 10px !important;
159
- box-shadow: 0 4px 10px rgba(0, 0, 0, 0.5);
160
- padding: 5px 0;
161
- }
162
- .gradio-container .gr-dropdown-options button:hover {
163
- background: #ffcc3320 !important;
164
- color: #fff !important;
165
- }
166
- /* ------------------------------------- */
167
-
168
-
169
- /* Make ChatInterface/Chatbot screen adjusting */
170
- .gradio-container .chat-interface .chatbot {
171
- min-height: calc(100vh - 220px);
172
- background:#111;
173
- color:#eee;
174
- flex-grow: 1;
175
- }
176
-
177
- /* style send button */
178
- .gr-button { border-radius:10px !important; background:#2c2c3f !important; color:#fff !important; }
179
-
180
- /* Mobile View Adjustments: Stack title and dropdown on small screens */
181
- @media (max-width: 600px) {
182
- #topbar {
183
- flex-direction: column;
184
- align-items: flex-start;
185
- gap: 10px;
186
- }
187
- #model_dropdown {
188
- width: 100%;
189
- max-width: none;
190
- }
191
- }
192
- """
193
 
194
- # ---------------------------
195
- # Build UI (Final Layout)
196
- # ---------------------------
197
- with gr.Blocks(css=css, title="⚡ YellowFlash.ai") as app:
198
- with gr.Row(elem_id="topbar"):
199
-
200
- # 1. Title (FIXED: scale argument removed)
201
- # This will take up the remaining space because the dropdown is scaled to 0
202
- gr.Markdown(f'<span id="title">⚡ YellowFlash.ai</span>', elem_id="title_md")
203
-
204
- # 2. Dropdown
205
- model_dropdown = gr.Dropdown(
206
- choices=["Google Gemini 2.0 Flash", "Meta LLaMA 4"],
207
- value="Google Gemini 2.0 Flash",
208
- show_label=False,
209
- elem_id="model_dropdown",
210
- scale=0 # Correct way to prevent it from growing horizontally
211
- )
212
-
213
- # Keep the original "under development" text
214
- gr.Markdown("under development")
215
-
216
- gr.ChatInterface(
217
- fn=chat_fn,
218
- title="",
219
- description="",
220
- additional_inputs=[model_dropdown],
221
- )
222
 
223
- if __name__ == "__main__":
224
- app.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # yellowflash_with_streamlit.py
2
+ # Production-ready structure using Streamlit
3
 
4
+ import streamlit as st
5
  import time
6
  import traceback
 
7
 
8
+ # Import API Libraries
9
  from google import genai
10
  from groq import Groq
 
 
11
 
12
  # ---------------------------
13
+ # API Key Setup (Hardcoded for current TEST request)
14
+ # NOTE: In production, load these from st.secrets instead.
15
  # ---------------------------
16
  GEMINI_KEY = "AIzaSyAPfDiu2V_aD6un00qHt5bkISm6C0Pkx7o"
17
  GROQ_KEY = "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"
18
  GROQ_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct"
19
 
 
20
  try:
21
  GEMINI_CLIENT = genai.Client(api_key=GEMINI_KEY)
22
  GROQ_CLIENT = Groq(api_key=GROQ_KEY)
23
  except Exception as e:
24
+ st.error(f"Failed to initialize API clients. Check keys. Error: {e}")
25
  GEMINI_CLIENT = None
26
  GROQ_CLIENT = None
27
 
28
+ # ---------------------------
29
+ # Streamlit Session State Initialization
30
+ # ---------------------------
31
+ if "messages" not in st.session_state:
32
+ st.session_state.messages = []
33
+
34
+ # ---------------------------
35
+ # Streamlit UI Setup
36
+ # ---------------------------
37
+
38
+ # Use a two-column layout for the header
39
+ col_title, col_dropdown = st.columns([3, 1])
40
+
41
+ with col_title:
42
+ # YellowFlash.ai Title
43
+ st.markdown(
44
+ f'<h1 style="color: #ffcc33; font-weight: 800; font-size: 24px; margin: 0;">⚡ YellowFlash.ai</h1>',
45
+ unsafe_allow_html=True
46
+ )
47
+
48
+ with col_dropdown:
49
+ # Model Dropdown (Universal and automatically responsive)
50
+ model_choice = st.selectbox(
51
+ label="Model",
52
+ options=["Google Gemini 2.0 Flash", "Meta LLaMA 4"],
53
+ index=0,
54
+ label_visibility="hidden",
55
+ key="model_selector"
56
+ )
57
+
58
+ st.markdown("_under development_")
59
+ st.divider() # Clean separation line
60
 
61
  # ---------------------------
62
+ # Model Callers (Modified for Streamlit)
63
  # ---------------------------
64
+
65
+ def call_gemini_stream(prompt, history):
66
  contents = []
67
+ # Streamlit history format is dicts: {"role": "user/assistant", "content": "..."}
68
+ for message in history:
69
+ contents.append({"role": message["role"].replace("assistant", "model"), "parts": [{"text": message["content"]}]})
 
70
 
71
  if GEMINI_CLIENT is None:
72
  yield "Error: Gemini client not initialized. Check API key."
 
77
  contents=contents
78
  )
79
 
 
80
  for chunk in response_stream:
81
  if chunk.text:
82
+ yield chunk.text
 
83
 
84
+ def call_llama_via_groq_stream(prompt, history):
85
  msgs = []
86
+ for message in history:
87
+ msgs.append({"role": message["role"], "content": message["content"]})
 
 
88
 
89
  if GROQ_CLIENT is None:
90
  yield "Error: Groq client not initialized. Check API key."
 
96
  stream=True
97
  )
98
 
 
99
  for chunk in response_stream:
100
+ # Groq streaming provides the delta content
101
+ content = chunk.choices[0].delta.content
102
+ if content:
103
+ yield content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
  # ---------------------------
106
+ # Main Chat Logic
107
  # ---------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
 
109
+ # 1. Display chat history
110
+ for message in st.session_state.messages:
111
+ # Streamlit uses st.chat_message for history display
112
+ with st.chat_message(message["role"]):
113
+ st.markdown(message["content"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
+ # 2. Handle user input
116
+ if prompt := st.chat_input("Ask YellowFlash..."):
117
+
118
+ # Add user message to state and display
119
+ st.session_state.messages.append({"role": "user", "content": prompt})
120
+ with st.chat_message("user"):
121
+ st.markdown(prompt)
122
+
123
+ # Determine which model to call
124
+ if model_choice == "Google Gemini 2.0 Flash":
125
+ stream_func = call_gemini_stream
126
+ else:
127
+ stream_func = call_llama_via_groq_stream
128
+
129
+ # Display and stream AI response
130
+ with st.chat_message("assistant"):
131
+ try:
132
+ # st.write_stream handles the generator function and types out the response
133
+ response_generator = stream_func(prompt, st.session_state.messages)
134
+ full_response = st.write_stream(response_generator)
135
+
136
+ # Save the full response to state
137
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
138
+
139
+ except Exception as e:
140
+ st.error(f"An error occurred while streaming from the API: {e}")