dlaima commited on
Commit
a9ce647
Β·
verified Β·
1 Parent(s): 1384a28

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -49
app.py CHANGED
@@ -1,8 +1,5 @@
1
  import warnings
2
  warnings.filterwarnings("ignore", message=".*TqdmWarning.*")
3
- from dotenv import load_dotenv
4
-
5
- _ = load_dotenv()
6
 
7
  from langgraph.graph import StateGraph, END
8
  from typing import TypedDict, Annotated, List
@@ -11,10 +8,12 @@ from langchain_core.messages import SystemMessage, HumanMessage
11
  from langchain_openai import ChatOpenAI
12
  from pydantic import BaseModel
13
  from tavily import TavilyClient
14
- import os
15
  import gradio as gr
16
 
17
- # Define agent state class
 
 
 
18
  class AgentState(TypedDict):
19
  task: str
20
  lnode: str
@@ -27,21 +26,30 @@ class AgentState(TypedDict):
27
  max_revisions: int
28
  count: Annotated[int, operator.add]
29
 
30
- # Define queries class
31
  class Queries(BaseModel):
32
  queries: List[str]
33
 
34
- # Writer Agent Class
35
- class Ewriter():
36
- def __init__(self):
37
- self.model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
 
 
 
 
 
 
 
 
 
 
38
  self.PLAN_PROMPT = "You are an expert writer tasked with writing a high-level outline of a short 3-paragraph essay."
39
  self.RESEARCH_PROMPT = "Generate three research queries to help in writing an essay on the given topic."
40
  self.WRITER_PROMPT = "You are an essay assistant tasked with writing an excellent 3-paragraph essay."
41
  self.REFLECTION_PROMPT = "You are a teacher grading an essay. Provide critique and suggestions."
42
- self.tavily = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
43
-
44
- # Initialize Graph
45
  builder = StateGraph(AgentState)
46
  builder.add_node("planner", self.plan_node)
47
  builder.add_node("research", self.research_node)
@@ -51,85 +59,108 @@ class Ewriter():
51
  builder.add_edge("planner", "research")
52
  builder.add_edge("research", "generate")
53
  builder.add_edge("generate", "reflect")
54
- builder.add_edge("reflect", END) # Ensure reflect is not a dead-end
55
-
56
  self.graph = builder.compile()
57
 
 
58
  def plan_node(self, state: AgentState):
59
  try:
60
  response = self.model.invoke([SystemMessage(content=self.PLAN_PROMPT), HumanMessage(content=state['task'])])
61
  return {"plan": response.content, "lnode": "planner", "count": 1}
62
  except Exception as e:
63
- return {"plan": f"Error occurred in planning: {str(e)}", "lnode": "planner", "count": 0}
64
 
65
  def research_node(self, state: AgentState):
66
  try:
67
  response = self.model.invoke([SystemMessage(content=self.RESEARCH_PROMPT), HumanMessage(content=state['task'])])
68
  return {"research_queries": response.content.split('\n'), "lnode": "research", "count": 1}
69
  except Exception as e:
70
- return {"research_queries": f"Error occurred in research: {str(e)}", "lnode": "research", "count": 0}
71
 
72
  def generation_node(self, state: AgentState):
73
  try:
74
  response = self.model.invoke([SystemMessage(content=self.WRITER_PROMPT), HumanMessage(content=state['task'])])
75
  return {"draft": response.content, "lnode": "generate", "count": 1}
76
  except Exception as e:
77
- return {"draft": f"Error occurred in generation: {str(e)}", "lnode": "generate", "count": 0}
78
-
79
  def reflection_node(self, state: AgentState):
80
  try:
81
- response = self.model.invoke([SystemMessage(content=self.REFLECTION_PROMPT), HumanMessage(content=state['draft'])])
82
  return {"critique": response.content, "lnode": "reflect", "count": 1}
83
  except Exception as e:
84
- return {"critique": f"Error occurred in reflection: {str(e)}", "lnode": "reflect", "count": 0}
85
 
 
 
86
  # Gradio UI
87
- class WriterGui():
88
- def __init__(self, graph):
89
- self.graph = graph
90
  self.demo = self.create_interface()
91
-
92
- def run_agent(self, topic, revision_number, max_revisions):
93
- config = {'task': topic, 'max_revisions': max_revisions, 'revision_number': revision_number, 'lnode': "", 'count': 0}
94
- response = self.graph.invoke(config)
95
- return response["draft"], response["lnode"], response["count"], response.get("critique", ""), response.get("research_queries", [])
96
-
97
- def continue_agent(self, topic, revision_number, max_revisions, last_node, current_draft):
98
- config = {'task': topic, 'max_revisions': max_revisions, 'revision_number': revision_number, 'lnode': last_node, 'draft': current_draft, 'count': 0}
99
- response = self.graph.invoke(config)
100
- return response["draft"], response["lnode"], response["count"], response.get("critique", ""), response.get("research_queries", [])
101
-
 
 
 
 
 
 
 
 
102
  def create_interface(self):
103
  with gr.Blocks() as demo:
104
  with gr.Tabs():
105
  with gr.Tab("Agent"):
106
- topic_input = gr.Textbox(label="Essay Topic")
 
 
 
 
107
  last_node = gr.Textbox(label="Last Node", interactive=False)
108
  next_node = gr.Textbox(label="Next Node", interactive=False)
109
- thread = gr.Textbox(label="Thread", interactive=False)
110
  draft_rev = gr.Textbox(label="Draft Revision", interactive=False)
111
  count = gr.Textbox(label="Count", interactive=False)
 
112
  generate_button = gr.Button("Generate Essay", variant="primary")
113
  continue_button = gr.Button("Continue Essay")
114
-
115
- with gr.Row():
116
- gr.Markdown("**Manage Agent**")
117
  with gr.Row():
118
- output_text = gr.Textbox(label="Live Agent Output", interactive=False)
119
  with gr.Row():
120
  critique_text = gr.Textbox(label="Critique", interactive=False)
121
  with gr.Row():
122
  research_text = gr.Textbox(label="Research Queries", interactive=False)
123
-
124
- generate_button.click(fn=self.run_agent, inputs=[topic_input, gr.State(0), gr.State(2)], outputs=[output_text, last_node, next_node, critique_text, research_text])
125
- continue_button.click(fn=self.continue_agent, inputs=[topic_input, gr.State(0), gr.State(2), last_node, draft_rev], outputs=[output_text, last_node, next_node, critique_text, research_text])
126
-
 
 
 
 
 
 
 
127
  return demo
128
 
129
  def launch(self):
130
  self.demo.launch(share=True)
131
 
132
- # Run the App
133
- MultiAgent = Ewriter()
134
- app = WriterGui(MultiAgent.graph)
135
- app.launch()
 
 
 
 
1
  import warnings
2
  warnings.filterwarnings("ignore", message=".*TqdmWarning.*")
 
 
 
3
 
4
  from langgraph.graph import StateGraph, END
5
  from typing import TypedDict, Annotated, List
 
8
  from langchain_openai import ChatOpenAI
9
  from pydantic import BaseModel
10
  from tavily import TavilyClient
 
11
  import gradio as gr
12
 
13
+
14
+ # ----------------------
15
+ # Agent State Definition
16
+ # ----------------------
17
  class AgentState(TypedDict):
18
  task: str
19
  lnode: str
 
26
  max_revisions: int
27
  count: Annotated[int, operator.add]
28
 
29
+
30
  class Queries(BaseModel):
31
  queries: List[str]
32
 
33
+
34
+ # ----------------------
35
+ # Writer Agent
36
+ # ----------------------
37
+ class Ewriter:
38
+ def __init__(self, openai_key: str, tavily_key: str):
39
+ if not openai_key or not tavily_key:
40
+ raise ValueError("⚠️ Both OpenAI and Tavily API keys must be provided.")
41
+
42
+ # Initialize models with user-provided keys
43
+ self.model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0, api_key=openai_key)
44
+ self.tavily = TavilyClient(api_key=tavily_key)
45
+
46
+ # Prompts
47
  self.PLAN_PROMPT = "You are an expert writer tasked with writing a high-level outline of a short 3-paragraph essay."
48
  self.RESEARCH_PROMPT = "Generate three research queries to help in writing an essay on the given topic."
49
  self.WRITER_PROMPT = "You are an essay assistant tasked with writing an excellent 3-paragraph essay."
50
  self.REFLECTION_PROMPT = "You are a teacher grading an essay. Provide critique and suggestions."
51
+
52
+ # Build the workflow graph
 
53
  builder = StateGraph(AgentState)
54
  builder.add_node("planner", self.plan_node)
55
  builder.add_node("research", self.research_node)
 
59
  builder.add_edge("planner", "research")
60
  builder.add_edge("research", "generate")
61
  builder.add_edge("generate", "reflect")
62
+ builder.add_edge("reflect", END)
63
+
64
  self.graph = builder.compile()
65
 
66
+ # ----------- Nodes -----------
67
  def plan_node(self, state: AgentState):
68
  try:
69
  response = self.model.invoke([SystemMessage(content=self.PLAN_PROMPT), HumanMessage(content=state['task'])])
70
  return {"plan": response.content, "lnode": "planner", "count": 1}
71
  except Exception as e:
72
+ return {"plan": f"Error in planning: {str(e)}", "lnode": "planner", "count": 0}
73
 
74
  def research_node(self, state: AgentState):
75
  try:
76
  response = self.model.invoke([SystemMessage(content=self.RESEARCH_PROMPT), HumanMessage(content=state['task'])])
77
  return {"research_queries": response.content.split('\n'), "lnode": "research", "count": 1}
78
  except Exception as e:
79
+ return {"research_queries": [f"Error in research: {str(e)}"], "lnode": "research", "count": 0}
80
 
81
  def generation_node(self, state: AgentState):
82
  try:
83
  response = self.model.invoke([SystemMessage(content=self.WRITER_PROMPT), HumanMessage(content=state['task'])])
84
  return {"draft": response.content, "lnode": "generate", "count": 1}
85
  except Exception as e:
86
+ return {"draft": f"Error in generation: {str(e)}", "lnode": "generate", "count": 0}
87
+
88
  def reflection_node(self, state: AgentState):
89
  try:
90
+ response = self.model.invoke([SystemMessage(content=self.REFLECTION_PROMPT), HumanMessage(content=state.get("draft", ""))])
91
  return {"critique": response.content, "lnode": "reflect", "count": 1}
92
  except Exception as e:
93
+ return {"critique": f"Error in reflection: {str(e)}", "lnode": "reflect", "count": 0}
94
 
95
+
96
+ # ----------------------
97
  # Gradio UI
98
+ # ----------------------
99
+ class WriterGui:
100
+ def __init__(self):
101
  self.demo = self.create_interface()
102
+
103
+ def run_agent(self, openai_key, tavily_key, topic, revision_number, max_revisions):
104
+ try:
105
+ agent = Ewriter(openai_key, tavily_key)
106
+ config = {'task': topic, 'max_revisions': max_revisions, 'revision_number': revision_number, 'lnode': "", 'count': 0}
107
+ response = agent.graph.invoke(config)
108
+ return response.get("draft", ""), response.get("lnode", ""), response.get("count", 0), response.get("critique", ""), response.get("research_queries", [])
109
+ except Exception as e:
110
+ return f"❌ Error: {str(e)}", "", 0, "", []
111
+
112
+ def continue_agent(self, openai_key, tavily_key, topic, revision_number, max_revisions, last_node, current_draft):
113
+ try:
114
+ agent = Ewriter(openai_key, tavily_key)
115
+ config = {'task': topic, 'max_revisions': max_revisions, 'revision_number': revision_number, 'lnode': last_node, 'draft': current_draft, 'count': 0}
116
+ response = agent.graph.invoke(config)
117
+ return response.get("draft", ""), response.get("lnode", ""), response.get("count", 0), response.get("critique", ""), response.get("research_queries", [])
118
+ except Exception as e:
119
+ return f"❌ Error: {str(e)}", "", 0, "", []
120
+
121
  def create_interface(self):
122
  with gr.Blocks() as demo:
123
  with gr.Tabs():
124
  with gr.Tab("Agent"):
125
+ with gr.Row():
126
+ openai_input = gr.Textbox(label="πŸ”‘ OpenAI API Key", type="password", placeholder="Enter your OpenAI key")
127
+ tavily_input = gr.Textbox(label="πŸ”‘ Tavily API Key", type="password", placeholder="Enter your Tavily key")
128
+
129
+ topic_input = gr.Textbox(label="πŸ“˜ Essay Topic")
130
  last_node = gr.Textbox(label="Last Node", interactive=False)
131
  next_node = gr.Textbox(label="Next Node", interactive=False)
 
132
  draft_rev = gr.Textbox(label="Draft Revision", interactive=False)
133
  count = gr.Textbox(label="Count", interactive=False)
134
+
135
  generate_button = gr.Button("Generate Essay", variant="primary")
136
  continue_button = gr.Button("Continue Essay")
137
+
 
 
138
  with gr.Row():
139
+ output_text = gr.Textbox(label="Essay Draft", interactive=False)
140
  with gr.Row():
141
  critique_text = gr.Textbox(label="Critique", interactive=False)
142
  with gr.Row():
143
  research_text = gr.Textbox(label="Research Queries", interactive=False)
144
+
145
+ generate_button.click(
146
+ fn=self.run_agent,
147
+ inputs=[openai_input, tavily_input, topic_input, gr.State(0), gr.State(2)],
148
+ outputs=[output_text, last_node, next_node, critique_text, research_text]
149
+ )
150
+ continue_button.click(
151
+ fn=self.continue_agent,
152
+ inputs=[openai_input, tavily_input, topic_input, gr.State(0), gr.State(2), last_node, draft_rev],
153
+ outputs=[output_text, last_node, next_node, critique_text, research_text]
154
+ )
155
  return demo
156
 
157
  def launch(self):
158
  self.demo.launch(share=True)
159
 
160
+
161
+ # ----------------------
162
+ # Run App
163
+ # ----------------------
164
+ if __name__ == "__main__":
165
+ app = WriterGui()
166
+ app.launch()