Spaces:
Sleeping
Sleeping
File size: 4,085 Bytes
359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb 359f16f 336f8fb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import streamlit as st
import os
from google import genai
from google.genai import types
# --- Configuration & Styling ---
st.set_page_config(page_title="Gemini 3: Hypothesis Engine", layout="wide")
st.title("🔬 Advanced Scientific Hypothesis Engine")
st.caption("Powered by Gemini 3 Pro Reasoning & Action Loops")
# SECURE API KEY: Add this in Hugging Face "Settings > Secrets"
API_KEY = os.environ.get("GOOGLE_API_KEY")
if not API_KEY:
st.error("Please add your GOOGLE_API_KEY to the Hugging Face Space Secrets.")
st.stop()
# --- Initialize Gen AI Client ---
client = genai.Client(api_key=API_KEY)
# --- Define Advanced System Instructions ---
SYSTEM_INSTRUCTIONS = """
You are a Senior Scientific Discovery Agent specializing in cross-disciplinary synthesis.
Your core objective: Find contradictions, missing links, or novel hypotheses in massive research datasets.
STRATEGIC PROTOCOL:
1. ANALYSIS: Scan the 1M token context for conflicting claims between papers.
2. PLANNING: Explicitly state your reasoning path before taking any action.
3. VERIFICATION: Use the 'code_execution' tool to run Python simulations or statistical checks.
4. GROUNDING: Use 'google_search' to verify if your discovery is already public.
5. PERSISTENCE: If a tool fails, analyze the error and try a different Python approach.
DO NOT provide medical diagnoses. Focus on chemistry, physics, and materials science.
"""
# --- Stateful Session Management ---
if "chat" not in st.session_state:
# Official SDK handles Thought Signatures automatically in Chat sessions
st.session_state.chat = client.chats.create(
model="gemini-3-pro-preview",
config=types.GenerateContentConfig(
system_instruction=SYSTEM_INSTRUCTIONS,
thinking_config=types.ThinkingConfig(
include_thoughts=True,
thinking_level=types.ThinkingLevel.HIGH # Mandatory for Marathon Agents
),
tools=[
types.Tool(google_search=types.GoogleSearchRetrieval()),
types.Tool(code_execution=types.ToolCodeExecution())
],
temperature=1.0 # Gemini 3 reasoning is optimized for 1.0
)
)
st.session_state.messages = []
# --- UI Sidebar: Multi-Paper Ingestion ---
with st.sidebar:
st.header("Research Corpus")
uploaded_files = st.file_uploader("Upload PDFs (Max 1M Tokens)", type="pdf", accept_multiple_files=True)
if st.button("Reset Lab State"):
st.session_state.chat = None # Resetting will trigger re-initialization
st.session_state.messages = []
st.rerun()
# --- Main Interaction Loop ---
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
if prompt := st.chat_input("Enter your research objective..."):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
# We use st.status to show the "Thought Signatures" and Action Loops live
with st.status("Agent Reasoning...", expanded=True) as status:
response = st.session_state.chat.send_message(prompt)
# 1. Display Internal Reasoning (Thought Summary)
if response.candidates[0].thought_summary:
st.info(f"**Thought Signature Path:**\n{response.candidates[0].thought_summary}")
# 2. Display Action Loop: Code Execution & Search
for part in response.candidates[0].content.parts:
if part.executable_code:
st.code(part.executable_code.code, language="python", label="Agent-Generated Script")
if part.code_execution_result:
st.success(f"Execution Output: {part.code_execution_result.output}")
status.update(label="Discovery Finalized", state="complete")
st.markdown(response.text)
st.session_state.messages.append({"role": "assistant", "content": response.text}) |