File size: 4,096 Bytes
6b09ebf
 
 
a53f0e9
6b09ebf
 
 
 
 
a53f0e9
6b09ebf
 
 
a53f0e9
6b09ebf
 
 
a53f0e9
6b09ebf
 
 
7fdacd5
6b09ebf
 
a53f0e9
6b09ebf
 
a53f0e9
6b09ebf
 
 
a53f0e9
6b09ebf
 
 
 
 
 
 
 
 
 
 
 
 
 
a53f0e9
6b09ebf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a53f0e9
6b09ebf
 
 
 
 
 
 
 
 
 
 
 
 
 
a53f0e9
6b09ebf
 
 
 
 
 
 
 
 
 
a53f0e9
6b09ebf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a53f0e9
6b09ebf
a53f0e9
 
 
6b09ebf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
# app.py
import os
from typing import Dict, TypedDict, Annotated, Sequence
import gradio as gr
from langgraph.graph import END, StateGraph
from langchain_core.messages import AIMessage
import google.generativeai as genai
from langsmith import Client
from langchain_core.tracers.context import tracing_v2_enabled

# Configuration - use environment variables for secrets
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
LANGSMITH_API_KEY = os.getenv("LANGSMITH_API_KEY")

# Setting up APIs
if GEMINI_API_KEY:
    genai.configure(api_key=GEMINI_API_KEY)

if LANGSMITH_API_KEY:
    os.environ["LANGCHAIN_TRACING_V2"] = "true"
    os.environ["LANGCHAIN_API_KEY"] = LANGSMITH_API_KEY
    os.environ["LANGCHAIN_PROJECT"] = "multi_agent"
    os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
    client = Client()

# I use my free tier model
gemini_model = genai.GenerativeModel('gemini-1.5-flash')

class AgentState(TypedDict):
    messages: Annotated[Sequence[AIMessage], lambda x, y: x + y]
    user_query: str

def research_agent(state: AgentState) -> Dict:
    user_query = state['user_query']
    prompt = f"""You are a research assistant. Conduct research on: {user_query}
    Provide:
    - 3 key findings
    - 2 reliable sources
    - Potential applications
    Keep response concise (1-2 paragraphs)."""
    
    try:
        response = gemini_model.generate_content(prompt)
        return {"messages": [AIMessage(content=f"RESEARCH REPORT:\n{response.text}")]}
    except Exception as e:
        return {"messages": [AIMessage(content=f"Research failed: {str(e)}")]}

def writing_agent(state: AgentState) -> Dict:
    messages = state['messages']
    research_report = next(m.content for m in messages if "RESEARCH REPORT" in m.content)
    prompt = f"""Write a short article (3 paragraphs max) about:
    Topic: {state['user_query']}
    Research: {research_report}
    Include:
    - Introduction
    - Key points
    - Conclusion"""
    
    try:
        response = gemini_model.generate_content(prompt)
        return {"messages": [AIMessage(content=f"ARTICLE DRAFT:\n{response.text}")]}
    except Exception as e:
        return {"messages": [AIMessage(content=f"Writing failed: {str(e)}")]}

def editing_agent(state: AgentState) -> Dict:
    draft = next(m.content for m in state['messages'] if "ARTICLE DRAFT" in m.content)
    prompt = f"""Improve this draft (keep under 300 words):
    {draft}
    Make it:
    - More engaging
    - Better structured
    - Easier to read"""
    
    try:
        response = gemini_model.generate_content(prompt)
        return {"messages": [AIMessage(content=f"FINAL OUTPUT:\n{response.text}")]}
    except Exception as e:
        return {"messages": [AIMessage(content=f"Editing failed: {str(e)}")]}

def create_workflow():
    workflow = StateGraph(AgentState)
    workflow.add_node("researcher", research_agent)
    workflow.add_node("writer", writing_agent)
    workflow.add_node("editor", editing_agent)
    workflow.add_edge("researcher", "writer")
    workflow.add_edge("writer", "editor")
    workflow.add_edge("editor", END)
    workflow.set_entry_point("researcher")
    return workflow.compile()

app = create_workflow()

def process_input(user_input):
    try:
        result = app.invoke({"messages": [], "user_query": user_input})
        final_output = next(m.content for m in result['messages'] if "FINAL OUTPUT" in m.content)
        return final_output.split("FINAL OUTPUT:")[1].strip()
    except Exception as e:
        return f"Error: {str(e)}"

demo = gr.Interface(
    fn=process_input,
    inputs=gr.Textbox(label="Your question", placeholder="Ask me anything..."),
    outputs=gr.Textbox(label="Generated Article", lines=10),
    title="🧠 AI Research Assistant",
    description="A multi-agent system that researches topics and writes articles using Gemini 1.5 Flash",
    examples=[
        ["Explain quantum computing simply"],
        ["What are the health benefits of meditation?"],
        ["How do solar panels work?"]
    ],
    allow_flagging="never"
)

if __name__ == "__main__":
    demo.launch()