Abyssinia-Intelligence / src /streamlit_app.py
dawit45's picture
Update src/streamlit_app.py
d2d943e verified
import streamlit as st
import os
from google import genai
from google.genai import types
# --- 1. CONFIG ---
st.set_page_config(page_title="Abyssinia Intelligence 3.0", page_icon="πŸ₯")
api_key = os.environ.get("GOOGLE_API_KEY")
client = genai.Client(api_key=api_key)
if "messages" not in st.session_state:
st.session_state.messages = []
# --- 2. UI ---
st.title("πŸ₯ Abyssinia Intelligence 3.0")
st.caption("Now powered by Gemini 3.0 Flash with Agentic Reasoning")
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
# --- 3. LOGIC ---
if prompt := st.chat_input("How can I help you today?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
try:
# Map history for the 2026 SDK
history_bundle = [
types.Content(role=m["role"], parts=[types.Part(text=m["content"])])
for m in st.session_state.messages[:-1]
]
chat = client.chats.create(
model="gemini-3-flash-preview", # <-- UPGRADED MODEL
config=types.GenerateContentConfig(
system_instruction="You are Abyssinia Intelligence. Use advanced medical reasoning.",
# Setting thinking to 'low' helps avoid the 429 quota errors
# by reducing the computational load for simple chat turns.
thinking_config=types.ThinkingConfig(
thinking_level=types.ThinkingLevel.LOW
),
temperature=0.3
),
history=history_bundle
)
response = chat.send_message(prompt)
st.markdown(response.text)
st.session_state.messages.append({"role": "assistant", "content": response.text})
except Exception as e:
if "429" in str(e):
st.error("🚨 **Quota Exceeded.** Gemini 3.0 Flash is in high demand. Please wait 10 seconds.")
else:
st.error(f"Error: {e}")