Spaces:
Sleeping
Sleeping
File size: 4,416 Bytes
1ddae15 c96d7fa 1ddae15 9270b2b 0bafc1e 1ddae15 0bafc1e 233170c 1ddae15 233170c 1ddae15 a35144e 7420099 1ddae15 7420099 1ddae15 7420099 1ddae15 7420099 1ddae15 9270b2b afdd523 1ddae15 afdd523 1ddae15 7420099 1ddae15 6d938d8 1ddae15 6d938d8 1ddae15 6d938d8 1ddae15 8d37e87 0bafc1e 1ddae15 0bafc1e 1ddae15 7420099 1ddae15 7420099 1ddae15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 | # yellowflash_with_perplexity.py
# TEST ONLY: hardcoded keys included (do NOT publish)
import time, traceback, requests
import gradio as gr
# ---------------------------
# HARDCODED KEYS (TESTING)
# ---------------------------
GEMINI_KEY = "AIzaSyAPfDiu2V_aD6un00qHt5bkISm6C0Pkx7o"
GEMINI_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent"
GROQ_KEY = "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"
GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
GROQ_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct"
# ---------------------------
# Helpers
# ---------------------------
def post_with_retries(url, headers, payload, timeout=18, max_retries=2):
for i in range(max_retries):
try:
r = requests.post(url, headers=headers, json=payload, timeout=timeout)
r.raise_for_status()
return r
except Exception as e:
if i == max_retries - 1:
raise
time.sleep(0.5 + i)
raise Exception("Max retries exceeded")
# ---------------------------
# Model callers
# ---------------------------
def call_gemini(api_key, message, history):
headers = {"Content-Type": "application/json", "x-goog-api-key": api_key}
contents = []
for u, m in history:
contents.append({"role":"user","parts":[{"text":u}]})
contents.append({"role":"model","parts":[{"text":m}]})
contents.append({"role":"user","parts":[{"text":message}]})
payload = {"contents": contents}
r = post_with_retries(GEMINI_URL, headers, payload)
data = r.json()
return data.get("candidates",[{}])[0].get("content",{}).get("parts",[{}])[0].get("text","")
def call_llama_via_groq(api_key, model, message, history):
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
msgs = []
for u, m in history:
msgs.append({"role":"user","content":u})
msgs.append({"role":"assistant","content":m})
msgs.append({"role":"user","content":message})
payload = {"model": model, "messages": msgs}
r = post_with_retries(GROQ_URL, headers, payload)
data = r.json()
if "choices" in data and data["choices"]:
ch = data["choices"][0]
if isinstance(ch.get("message"), dict):
return ch["message"].get("content","")
return ch.get("text","")
return str(data)
# ---------------------------
# Chat function
# ---------------------------
def chat_fn(message, history, model_choice):
try:
if model_choice == "Google Gemini 2.0 Flash":
return call_gemini(GEMINI_KEY, message, history)
elif model_choice == "Meta LLaMA 4":
return call_llama_via_groq(GROQ_KEY, GROQ_MODEL, message, history)
else:
return f"Unknown model: {model_choice}"
except Exception as e:
return f"Error: {e}\n{traceback.format_exc()}"
# ---------------------------
# Dark Mode CSS (your original)
# ---------------------------
css = """
/* topbar layout */
#topbar { display:flex; justify-content:space-between; align-items:center;
padding:18px 28px; background:#0f0f0f; border-bottom:1px solid #1f1f1f; }
#title { font-weight:800; color:#ffcc33; font-size:20px; }
/* compact, flat dropdown look */
#model_dropdown .gr-dropdown { background:#1a1a1a !important; border:1px solid #2b2b2b !important;
color:#ddd !important; padding:10px 12px !important; border-radius:8px !important;
width:260px !important; box-shadow:none !important; }
/* make ChatInterface chat area taller */
.gradio-container .chat-interface .chatbot { min-height: calc(100vh - 220px); background:#111; color:#eee; }
/* style send button */
.gr-button { border-radius:10px !important; background:#2c2c3f !important; color:#fff !important; }
"""
# ---------------------------
# Build UI
# ---------------------------
with gr.Blocks(css=css, title="⚡ YellowFlash.ai") as app:
with gr.Row(elem_id="topbar"):
model_dropdown = gr.Dropdown(
choices=["Google Gemini 2.0 Flash", "Meta LLaMA 4"],
value="Google Gemini 2.0 Flash",
show_label=False,
elem_id="model_dropdown"
)
gr.ChatInterface(
fn=chat_fn,
title="⚡ YellowFlash.ai",
description="under development",
additional_inputs=[model_dropdown],
)
app.launch(share=True) |