hypothesis-engine / streamlit_app.py
Wall06's picture
Rename app.py to streamlit_app.py
e729e63 verified
import streamlit as st
import os
from google import genai
from google.genai import types
# --- Configuration & Styling ---
st.set_page_config(page_title="Gemini 3: Hypothesis Engine", layout="wide")
st.title("🔬 Advanced Scientific Hypothesis Engine")
st.caption("Powered by Gemini 3 Pro Reasoning & Action Loops")
# SECURE API KEY: Add this in Hugging Face "Settings > Secrets"
API_KEY = os.environ.get("GOOGLE_API_KEY")
if not API_KEY:
st.error("Please add your GOOGLE_API_KEY to the Hugging Face Space Secrets.")
st.stop()
# --- Initialize Gen AI Client ---
client = genai.Client(api_key=API_KEY)
# --- Define Advanced System Instructions ---
SYSTEM_INSTRUCTIONS = """
You are a Senior Scientific Discovery Agent specializing in cross-disciplinary synthesis.
Your core objective: Find contradictions, missing links, or novel hypotheses in massive research datasets.
STRATEGIC PROTOCOL:
1. ANALYSIS: Scan the 1M token context for conflicting claims between papers.
2. PLANNING: Explicitly state your reasoning path before taking any action.
3. VERIFICATION: Use the 'code_execution' tool to run Python simulations or statistical checks.
4. GROUNDING: Use 'google_search' to verify if your discovery is already public.
5. PERSISTENCE: If a tool fails, analyze the error and try a different Python approach.
DO NOT provide medical diagnoses. Focus on chemistry, physics, and materials science.
"""
# --- Stateful Session Management ---
if "chat" not in st.session_state:
# Official SDK handles Thought Signatures automatically in Chat sessions
st.session_state.chat = client.chats.create(
model="gemini-3-pro-preview",
config=types.GenerateContentConfig(
system_instruction=SYSTEM_INSTRUCTIONS,
thinking_config=types.ThinkingConfig(
include_thoughts=True,
thinking_level=types.ThinkingLevel.HIGH # Mandatory for Marathon Agents
),
tools=[
types.Tool(google_search=types.GoogleSearchRetrieval()),
types.Tool(code_execution=types.ToolCodeExecution())
],
temperature=1.0 # Gemini 3 reasoning is optimized for 1.0
)
)
st.session_state.messages = []
# --- UI Sidebar: Multi-Paper Ingestion ---
with st.sidebar:
st.header("Research Corpus")
uploaded_files = st.file_uploader("Upload PDFs (Max 1M Tokens)", type="pdf", accept_multiple_files=True)
if st.button("Reset Lab State"):
st.session_state.chat = None # Resetting will trigger re-initialization
st.session_state.messages = []
st.rerun()
# --- Main Interaction Loop ---
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
if prompt := st.chat_input("Enter your research objective..."):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
# We use st.status to show the "Thought Signatures" and Action Loops live
with st.status("Agent Reasoning...", expanded=True) as status:
response = st.session_state.chat.send_message(prompt)
# 1. Display Internal Reasoning (Thought Summary)
if response.candidates[0].thought_summary:
st.info(f"**Thought Signature Path:**\n{response.candidates[0].thought_summary}")
# 2. Display Action Loop: Code Execution & Search
for part in response.candidates[0].content.parts:
if part.executable_code:
st.code(part.executable_code.code, language="python", label="Agent-Generated Script")
if part.code_execution_result:
st.success(f"Execution Output: {part.code_execution_result.output}")
status.update(label="Discovery Finalized", state="complete")
st.markdown(response.text)
st.session_state.messages.append({"role": "assistant", "content": response.text})