manvithll commited on
Commit
d89fb75
·
verified ·
1 Parent(s): d5b463d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -71
app.py CHANGED
@@ -1,92 +1,111 @@
1
- # yellowflash_chat_with_image.py
2
- # Native ChatInterface + Gemini image gen in-chat
3
 
4
- import time, requests, io, base64
 
5
  import gradio as gr
6
  from PIL import Image
7
 
8
  # ---------------------------
9
- # HARDCODED KEYS (TESTING)
10
  # ---------------------------
11
- GEMINI_KEY = "AIzaSyAPfDiu2V_aD6un00qHt5bkISm6C0Pkx7o"
12
- GEMINI_TEXT_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent"
13
-
14
- GROQ_KEY = "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"
15
- GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
16
  GROQ_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct"
17
 
18
  # ---------------------------
19
- # IMAGE GENERATION CALL (using google-genai style API)
20
  # ---------------------------
21
- def call_gemini_image(prompt_text):
 
 
 
 
 
 
22
  try:
23
- # NOTE: requires google-genai installed and correct endpoint
24
  from google import genai
25
- client = genai.Client(api_key=GEMINI_KEY)
26
- result = client.models.generate_images(
27
- model="models/imagen-4.0-generate-001",
28
- prompt=prompt_text,
29
- config=dict(number_of_images=1, output_mime_type="image/jpeg"),
30
- )
31
- if not result.generated_images:
32
- return "⚠️ No image generated."
33
-
34
- gi = result.generated_images[0]
35
- if hasattr(gi, "image"):
36
- return gi.image # PIL.Image -> Gradio can render inline
37
- if hasattr(gi, "dataURI"):
38
- header, b64 = gi.dataURI.split(",", 1)
39
- img_bytes = io.BytesIO(base64.b64decode(b64))
40
- return Image.open(img_bytes).convert("RGB")
41
- return "⚠️ Unknown image response."
42
- except Exception as e:
43
- return f"Image generation error: {e}"
44
 
45
  # ---------------------------
46
- # TEXT CALLS
47
  # ---------------------------
48
- def call_gemini_text(message, history):
49
- headers = {"Content-Type":"application/json","x-goog-api-key": GEMINI_KEY}
50
- contents = []
51
- for u, m in history:
52
- contents.append({"role":"user","parts":[{"text":u}]})
53
- contents.append({"role":"model","parts":[{"text":m}]})
54
- contents.append({"role":"user","parts":[{"text":message}]})
55
- payload = {"contents": contents}
56
- r = requests.post(GEMINI_TEXT_URL, headers=headers, json=payload, timeout=20)
57
- data = r.json()
58
- return data.get("candidates",[{}])[0].get("content",{}).get("parts",[{}])[0].get("text","")
59
-
60
  def call_llama_text(message, history):
61
- headers = {"Authorization": f"Bearer {GROQ_KEY}", "Content-Type":"application/json"}
62
  msgs = []
63
  for u, m in history:
64
- msgs.append({"role":"user","content":u})
65
- msgs.append({"role":"assistant","content":m})
66
- msgs.append({"role":"user","content":message})
67
  payload = {"model": GROQ_MODEL, "messages": msgs}
68
- r = requests.post(GROQ_URL, headers=headers, json=payload, timeout=20)
69
  data = r.json()
70
  if "choices" in data and data["choices"]:
71
- ch = data["choices"][0]
72
- return ch.get("message",{}).get("content","")
73
  return str(data)
74
 
75
  # ---------------------------
76
- # CHAT FUNCTION
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  # ---------------------------
78
  def chat_fn(message, history, model_choice):
79
  try:
80
- if model_choice == "Google Gemini 2.0 Flash":
81
- return call_gemini_text(message, history)
82
  elif model_choice == "Meta LLaMA 4":
83
  return call_llama_text(message, history)
84
- elif model_choice == "Gemini Imagen 2.0":
85
- # Return image directly in chat
86
- img = call_gemini_image(message)
87
- return img
88
  else:
89
- return "⚠️ Unknown model selected."
90
  except Exception as e:
91
  return f"Error: {e}"
92
 
@@ -94,24 +113,20 @@ def chat_fn(message, history, model_choice):
94
  # CSS
95
  # ---------------------------
96
  css = """
97
- #topbar { display:flex; justify-content:space-between; align-items:center;
98
- padding:16px 22px; background:#0f0f0f; border-bottom:1px solid #1f1f1f; }
99
- #title { font-weight:800; color:#ffcc33; font-size:20px; }
100
- #model_dropdown .gr-dropdown { background:transparent !important;
101
- border:1px solid #2b2b2b !important; color:#ddd !important;
102
- padding:8px 12px !important; border-radius:8px !important; width:260px !important; }
103
- .gradio-container .chat-interface .chatbot { min-height: calc(100vh - 220px); }
104
- .gr-button { border-radius:10px !important; }
105
  """
106
 
107
  # ---------------------------
108
- # BUILD UI
109
  # ---------------------------
110
  with gr.Blocks(css=css, title="⚡ YellowFlash.ai") as app:
111
  with gr.Row(elem_id="topbar"):
112
  model_dropdown = gr.Dropdown(
113
- choices=["Google Gemini 2.0 Flash", "Meta LLaMA 4", "Gemini Imagen 2.0"],
114
- value="Google Gemini 2.0 Flash",
115
  show_label=False,
116
  elem_id="model_dropdown"
117
  )
@@ -120,7 +135,7 @@ with gr.Blocks(css=css, title="⚡ YellowFlash.ai") as app:
120
  gr.ChatInterface(
121
  fn=chat_fn,
122
  title="⚡",
123
- description="FAST AS LIGHTNING — text + image in one UI",
124
  additional_inputs=[model_dropdown],
125
  )
126
 
 
1
+ # yellowflash_with_gemini25_images.py
2
+ # Hardcoded API keys for TESTING ONLY (do not publish).
3
 
4
+ import time, io, base64, mimetypes, traceback
5
+ import requests
6
  import gradio as gr
7
  from PIL import Image
8
 
9
  # ---------------------------
10
+ # HARDCODED KEYS
11
  # ---------------------------
12
+ GEMINI_KEY = "YOUR_GEMINI_KEY"
13
+ GROQ_KEY = "YOUR_GROQ_KEY"
14
+ GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
 
 
15
  GROQ_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct"
16
 
17
  # ---------------------------
18
+ # Lazy import of google-genai
19
  # ---------------------------
20
+ GENAI_AVAILABLE = False
21
+ GENAI_CLIENT = None
22
+
23
+ def ensure_genai_client():
24
+ global GENAI_AVAILABLE, GENAI_CLIENT
25
+ if GENAI_AVAILABLE:
26
+ return True
27
  try:
 
28
  from google import genai
29
+ GENAI_CLIENT = genai.Client(api_key=GEMINI_KEY)
30
+ GENAI_AVAILABLE = True
31
+ return True
32
+ except Exception:
33
+ return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
  # ---------------------------
36
+ # Text-only calls
37
  # ---------------------------
 
 
 
 
 
 
 
 
 
 
 
 
38
  def call_llama_text(message, history):
39
+ headers = {"Authorization": f"Bearer {GROQ_KEY}", "Content-Type": "application/json"}
40
  msgs = []
41
  for u, m in history:
42
+ msgs.append({"role": "user", "content": u})
43
+ msgs.append({"role": "assistant", "content": m})
44
+ msgs.append({"role": "user", "content": message})
45
  payload = {"model": GROQ_MODEL, "messages": msgs}
46
+ r = requests.post(GROQ_URL, headers=headers, json=payload, timeout=25)
47
  data = r.json()
48
  if "choices" in data and data["choices"]:
49
+ return data["choices"][0]["message"]["content"]
 
50
  return str(data)
51
 
52
  # ---------------------------
53
+ # Gemini 2.5 Image/Text call
54
+ # ---------------------------
55
+ def call_gemini25_image_text(prompt):
56
+ if not ensure_genai_client():
57
+ return "ERROR: google-genai not installed. Run `pip install google-genai`."
58
+
59
+ from google.genai import types
60
+
61
+ model = "gemini-2.5-flash-image-preview"
62
+ contents = [types.Content(role="user", parts=[types.Part.from_text(text=prompt)])]
63
+ config = types.GenerateContentConfig(response_modalities=["IMAGE", "TEXT"])
64
+
65
+ messages = []
66
+ image = None
67
+
68
+ try:
69
+ for chunk in GENAI_CLIENT.models.generate_content_stream(
70
+ model=model, contents=contents, config=config
71
+ ):
72
+ if not chunk.candidates:
73
+ continue
74
+ parts = chunk.candidates[0].content.parts
75
+ if not parts:
76
+ continue
77
+
78
+ part = parts[0]
79
+ if hasattr(part, "text") and part.text:
80
+ messages.append(part.text)
81
+ elif hasattr(part, "inline_data") and part.inline_data:
82
+ data = part.inline_data.data
83
+ mime = part.inline_data.mime_type
84
+ ext = mimetypes.guess_extension(mime) or ".jpg"
85
+ try:
86
+ img_bytes = io.BytesIO(data)
87
+ image = Image.open(img_bytes).convert("RGB")
88
+ except Exception as e:
89
+ messages.append(f"[Image decoding failed: {e}]")
90
+ except Exception as e:
91
+ return f"Error: {e}\n{traceback.format_exc()}"
92
+
93
+ caption = " ".join(messages).strip() or f"Generated image for: {prompt}"
94
+ if image:
95
+ return [(caption, image)]
96
+ return caption
97
+
98
+ # ---------------------------
99
+ # Chat fn for ChatInterface
100
  # ---------------------------
101
  def chat_fn(message, history, model_choice):
102
  try:
103
+ if model_choice == "Gemini Imagen 2.5":
104
+ return call_gemini25_image_text(message)
105
  elif model_choice == "Meta LLaMA 4":
106
  return call_llama_text(message, history)
 
 
 
 
107
  else:
108
+ return "Unknown model."
109
  except Exception as e:
110
  return f"Error: {e}"
111
 
 
113
  # CSS
114
  # ---------------------------
115
  css = """
116
+ #topbar { display:flex; justify-content:space-between; align-items:center; padding:14px 20px; background:#0f0f0f; border-bottom:1px solid #1f1f1f; position:fixed; top:0; left:0; right:0; z-index:999; }
117
+ #title { font-weight:800; color:#ffcc33; font-size:18px; }
118
+ #model_dropdown .gr-dropdown { background:transparent !important; border:1px solid #2b2b2b !important; color:#ddd !important; padding:8px 10px !important; border-radius:8px !important; width:260px !important; }
119
+ .gradio-container { padding-top: 72px !important; }
 
 
 
 
120
  """
121
 
122
  # ---------------------------
123
+ # Build UI
124
  # ---------------------------
125
  with gr.Blocks(css=css, title="⚡ YellowFlash.ai") as app:
126
  with gr.Row(elem_id="topbar"):
127
  model_dropdown = gr.Dropdown(
128
+ choices=["Meta LLaMA 4", "Gemini Imagen 2.5"],
129
+ value="Meta LLaMA 4",
130
  show_label=False,
131
  elem_id="model_dropdown"
132
  )
 
135
  gr.ChatInterface(
136
  fn=chat_fn,
137
  title="⚡",
138
+ description="Text + Image with Gemini 2.5",
139
  additional_inputs=[model_dropdown],
140
  )
141