File size: 2,209 Bytes
139b796
ca44f52
e5ed98b
 
139b796
d2d943e
 
ca44f52
e5ed98b
 
139b796
3e1c21c
 
 
d2d943e
 
 
3e1c21c
d2d943e
 
 
3e1c21c
d2d943e
 
3e1c21c
 
 
 
 
 
d2d943e
3e1c21c
 
d2d943e
3e1c21c
 
 
d2d943e
3e1c21c
d2d943e
 
 
 
 
 
 
3e1c21c
 
 
 
 
d2d943e
 
3e1c21c
 
d2d943e
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import streamlit as st
import os
from google import genai
from google.genai import types

# --- 1. CONFIG ---
st.set_page_config(page_title="Abyssinia Intelligence 3.0", page_icon="🏥")

api_key = os.environ.get("GOOGLE_API_KEY")
client = genai.Client(api_key=api_key)

if "messages" not in st.session_state:
    st.session_state.messages = []

# --- 2. UI ---
st.title("🏥 Abyssinia Intelligence 3.0")
st.caption("Now powered by Gemini 3.0 Flash with Agentic Reasoning")

for msg in st.session_state.messages:
    with st.chat_message(msg["role"]):
        st.markdown(msg["content"])

# --- 3. LOGIC ---
if prompt := st.chat_input("How can I help you today?"):
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.markdown(prompt)

    with st.chat_message("assistant"):
        try:
            # Map history for the 2026 SDK
            history_bundle = [
                types.Content(role=m["role"], parts=[types.Part(text=m["content"])])
                for m in st.session_state.messages[:-1]
            ]

            chat = client.chats.create(
                model="gemini-3-flash-preview", # <-- UPGRADED MODEL
                config=types.GenerateContentConfig(
                    system_instruction="You are Abyssinia Intelligence. Use advanced medical reasoning.",
                    # Setting thinking to 'low' helps avoid the 429 quota errors 
                    # by reducing the computational load for simple chat turns.
                    thinking_config=types.ThinkingConfig(
                        thinking_level=types.ThinkingLevel.LOW 
                    ),
                    temperature=0.3
                ),
                history=history_bundle
            )
            
            response = chat.send_message(prompt)
            st.markdown(response.text)
            st.session_state.messages.append({"role": "assistant", "content": response.text})
            
        except Exception as e:
            if "429" in str(e):
                st.error("🚨 **Quota Exceeded.** Gemini 3.0 Flash is in high demand. Please wait 10 seconds.")
            else:
                st.error(f"Error: {e}")