Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
warnings.filterwarnings("ignore", message=".*TqdmWarning.*")
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
|
| 5 |
+
_ = load_dotenv()
|
| 6 |
+
|
| 7 |
+
from langgraph.graph import StateGraph, END
|
| 8 |
+
from typing import TypedDict, Annotated, List
|
| 9 |
+
import operator
|
| 10 |
+
from langchain_core.messages import SystemMessage, HumanMessage
|
| 11 |
+
from langchain_openai import ChatOpenAI
|
| 12 |
+
from pydantic import BaseModel
|
| 13 |
+
from tavily import TavilyClient
|
| 14 |
+
import os
|
| 15 |
+
import gradio as gr
|
| 16 |
+
|
| 17 |
+
# Define agent state class
|
| 18 |
+
class AgentState(TypedDict):
|
| 19 |
+
task: str
|
| 20 |
+
lnode: str
|
| 21 |
+
plan: str
|
| 22 |
+
research_queries: List[str]
|
| 23 |
+
draft: str
|
| 24 |
+
critique: str
|
| 25 |
+
content: List[str]
|
| 26 |
+
revision_number: int
|
| 27 |
+
max_revisions: int
|
| 28 |
+
count: Annotated[int, operator.add]
|
| 29 |
+
|
| 30 |
+
# Define queries class
|
| 31 |
+
class Queries(BaseModel):
|
| 32 |
+
queries: List[str]
|
| 33 |
+
|
| 34 |
+
# Writer Agent Class
|
| 35 |
+
class Ewriter():
|
| 36 |
+
def __init__(self):
|
| 37 |
+
self.model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
|
| 38 |
+
self.PLAN_PROMPT = "You are an expert writer tasked with writing a high-level outline of a short 3-paragraph essay."
|
| 39 |
+
self.RESEARCH_PROMPT = "Generate three research queries to help in writing an essay on the given topic."
|
| 40 |
+
self.WRITER_PROMPT = "You are an essay assistant tasked with writing an excellent 3-paragraph essay."
|
| 41 |
+
self.REFLECTION_PROMPT = "You are a teacher grading an essay. Provide critique and suggestions."
|
| 42 |
+
self.tavily = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
|
| 43 |
+
|
| 44 |
+
# Initialize Graph
|
| 45 |
+
builder = StateGraph(AgentState)
|
| 46 |
+
builder.add_node("planner", self.plan_node)
|
| 47 |
+
builder.add_node("research", self.research_node)
|
| 48 |
+
builder.add_node("generate", self.generation_node)
|
| 49 |
+
builder.add_node("reflect", self.reflection_node)
|
| 50 |
+
builder.set_entry_point("planner")
|
| 51 |
+
builder.add_edge("planner", "research")
|
| 52 |
+
builder.add_edge("research", "generate")
|
| 53 |
+
builder.add_edge("generate", "reflect")
|
| 54 |
+
builder.add_edge("reflect", END) # Ensure reflect is not a dead-end
|
| 55 |
+
|
| 56 |
+
self.graph = builder.compile()
|
| 57 |
+
|
| 58 |
+
def plan_node(self, state: AgentState):
|
| 59 |
+
try:
|
| 60 |
+
response = self.model.invoke([SystemMessage(content=self.PLAN_PROMPT), HumanMessage(content=state['task'])])
|
| 61 |
+
return {"plan": response.content, "lnode": "planner", "count": 1}
|
| 62 |
+
except Exception as e:
|
| 63 |
+
return {"plan": f"Error occurred in planning: {str(e)}", "lnode": "planner", "count": 0}
|
| 64 |
+
|
| 65 |
+
def research_node(self, state: AgentState):
|
| 66 |
+
try:
|
| 67 |
+
response = self.model.invoke([SystemMessage(content=self.RESEARCH_PROMPT), HumanMessage(content=state['task'])])
|
| 68 |
+
return {"research_queries": response.content.split('\n'), "lnode": "research", "count": 1}
|
| 69 |
+
except Exception as e:
|
| 70 |
+
return {"research_queries": f"Error occurred in research: {str(e)}", "lnode": "research", "count": 0}
|
| 71 |
+
|
| 72 |
+
def generation_node(self, state: AgentState):
|
| 73 |
+
try:
|
| 74 |
+
response = self.model.invoke([SystemMessage(content=self.WRITER_PROMPT), HumanMessage(content=state['task'])])
|
| 75 |
+
return {"draft": response.content, "lnode": "generate", "count": 1}
|
| 76 |
+
except Exception as e:
|
| 77 |
+
return {"draft": f"Error occurred in generation: {str(e)}", "lnode": "generate", "count": 0}
|
| 78 |
+
|
| 79 |
+
def reflection_node(self, state: AgentState):
|
| 80 |
+
try:
|
| 81 |
+
response = self.model.invoke([SystemMessage(content=self.REFLECTION_PROMPT), HumanMessage(content=state['draft'])])
|
| 82 |
+
return {"critique": response.content, "lnode": "reflect", "count": 1}
|
| 83 |
+
except Exception as e:
|
| 84 |
+
return {"critique": f"Error occurred in reflection: {str(e)}", "lnode": "reflect", "count": 0}
|
| 85 |
+
|
| 86 |
+
# Gradio UI
|
| 87 |
+
class WriterGui():
|
| 88 |
+
def __init__(self, graph):
|
| 89 |
+
self.graph = graph
|
| 90 |
+
self.demo = self.create_interface()
|
| 91 |
+
|
| 92 |
+
def run_agent(self, topic, revision_number, max_revisions):
|
| 93 |
+
config = {'task': topic, 'max_revisions': max_revisions, 'revision_number': revision_number, 'lnode': "", 'count': 0}
|
| 94 |
+
response = self.graph.invoke(config)
|
| 95 |
+
return response["draft"], response["lnode"], response["count"], response.get("critique", ""), response.get("research_queries", [])
|
| 96 |
+
|
| 97 |
+
def continue_agent(self, topic, revision_number, max_revisions, last_node, current_draft):
|
| 98 |
+
config = {'task': topic, 'max_revisions': max_revisions, 'revision_number': revision_number, 'lnode': last_node, 'draft': current_draft, 'count': 0}
|
| 99 |
+
response = self.graph.invoke(config)
|
| 100 |
+
return response["draft"], response["lnode"], response["count"], response.get("critique", ""), response.get("research_queries", [])
|
| 101 |
+
|
| 102 |
+
def create_interface(self):
|
| 103 |
+
with gr.Blocks() as demo:
|
| 104 |
+
with gr.Tabs():
|
| 105 |
+
with gr.Tab("Agent"):
|
| 106 |
+
topic_input = gr.Textbox(label="Essay Topic")
|
| 107 |
+
last_node = gr.Textbox(label="Last Node", interactive=False)
|
| 108 |
+
next_node = gr.Textbox(label="Next Node", interactive=False)
|
| 109 |
+
thread = gr.Textbox(label="Thread", interactive=False)
|
| 110 |
+
draft_rev = gr.Textbox(label="Draft Revision", interactive=False)
|
| 111 |
+
count = gr.Textbox(label="Count", interactive=False)
|
| 112 |
+
generate_button = gr.Button("Generate Essay", variant="primary")
|
| 113 |
+
continue_button = gr.Button("Continue Essay")
|
| 114 |
+
|
| 115 |
+
with gr.Row():
|
| 116 |
+
gr.Markdown("**Manage Agent**")
|
| 117 |
+
with gr.Row():
|
| 118 |
+
output_text = gr.Textbox(label="Live Agent Output", interactive=False)
|
| 119 |
+
with gr.Row():
|
| 120 |
+
critique_text = gr.Textbox(label="Critique", interactive=False)
|
| 121 |
+
with gr.Row():
|
| 122 |
+
research_text = gr.Textbox(label="Research Queries", interactive=False)
|
| 123 |
+
|
| 124 |
+
generate_button.click(fn=self.run_agent, inputs=[topic_input, gr.State(0), gr.State(2)], outputs=[output_text, last_node, next_node, critique_text, research_text])
|
| 125 |
+
continue_button.click(fn=self.continue_agent, inputs=[topic_input, gr.State(0), gr.State(2), last_node, draft_rev], outputs=[output_text, last_node, next_node, critique_text, research_text])
|
| 126 |
+
|
| 127 |
+
return demo
|
| 128 |
+
|
| 129 |
+
def launch(self):
|
| 130 |
+
self.demo.launch(share=True)
|
| 131 |
+
|
| 132 |
+
# Run the App
|
| 133 |
+
MultiAgent = Ewriter()
|
| 134 |
+
app = WriterGui(MultiAgent.graph)
|
| 135 |
+
app.launch()
|