AlexFoxalt commited on
Commit
01e95d0
Β·
1 Parent(s): 7e348fd

Init commit session 6

Browse files
.idea/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
4
+ # Editor-based HTTP Client requests
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
.idea/Multi-Agent-App.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="jdk" jdkName="Python 3.13 virtualenv at ~/PycharmProjects/ai_bootcamp/AIE5-DeployPythonicRAG/.venv" jdkType="Python SDK" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.13 virtualenv at ~/PycharmProjects/ai_bootcamp/AIE5-DeployPythonicRAG/.venv" project-jdk-type="Python SDK" />
4
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/Multi-Agent-App.iml" filepath="$PROJECT_DIR$/.idea/Multi-Agent-App.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="" vcs="Git" />
5
+ </component>
6
+ </project>
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.13
Dockerfile ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Get a distribution that has uv already installed
3
+ FROM ghcr.io/astral-sh/uv:python3.13-bookworm-slim
4
+
5
+ # Add user - this is the user that will run the app
6
+ # If you do not set user, the app will run as root (undesirable)
7
+ RUN useradd -m -u 1000 user
8
+ USER user
9
+
10
+ # Set the home directory and path
11
+ ENV HOME=/home/user \
12
+ PATH=/home/user/.local/bin:$PATH
13
+
14
+ ENV UVICORN_WS_PROTOCOL=websockets
15
+
16
+
17
+ # Set the working directory
18
+ WORKDIR $HOME/app
19
+
20
+ # Copy the app to the container
21
+ COPY --chown=user . $HOME/app
22
+
23
+ # Install the dependencies
24
+ # RUN uv sync --frozen
25
+ RUN uv sync
26
+
27
+ # Expose the port
28
+ EXPOSE 7860
29
+
30
+ # Run the app
31
+ CMD ["uv", "run", "chainlit", "run", "app/app.py", "--host", "0.0.0.0", "--port", "7860"]
app/__init__.py ADDED
File without changes
app/app.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from operator import itemgetter
3
+ from langchain_core.runnables.base import RunnableSerializable
4
+ import chainlit as cl
5
+ import aiofiles
6
+ from langchain_community.vectorstores import Qdrant
7
+ from langchain_core.prompts import ChatPromptTemplate
8
+ from langchain.schema.output_parser import StrOutputParser
9
+ from langgraph.errors import GraphRecursionError
10
+
11
+ import messages as msg
12
+ from parsers import ParsersMap
13
+ from splitters import TextSplitter
14
+ from models import EmbeddingLLM, MiniLLM
15
+ from prompts import RAG_PROMPT
16
+ from constructors import (
17
+ construct_research_graph,
18
+ construct_authoring_graph,
19
+ construct_super_graph,
20
+ construct_correctness_graph,
21
+ )
22
+ from utils import enter_chain
23
+
24
+
25
+ @cl.on_chat_start
26
+ async def init_chat():
27
+ files = None
28
+
29
+ # Wait for the user to upload a file
30
+ while files is None:
31
+ files = await cl.AskFileMessage(
32
+ content=msg.INIT_MSG,
33
+ accept=["text/plain", "application/pdf"],
34
+ ).send()
35
+
36
+ file = files[0]
37
+ _, ext = os.path.splitext(file.name)
38
+ parser = ParsersMap[ext]
39
+
40
+ async with aiofiles.open(file.path, "rb") as f:
41
+ file_content = await f.read()
42
+
43
+ file_text = parser.load(file_content)
44
+ status_msg_text = """
45
+ Processing...please wait
46
+ [1/4] Read file: {read_status}
47
+ [2/4] Chunk file: {split_status}
48
+ [3/4] Load file to DB: {load_status}
49
+ [4/4] Build graph: {graph_status}
50
+ """
51
+ status_msg = cl.Message(
52
+ status_msg_text.format(
53
+ read_status="🟒", split_status="πŸ”Ή", load_status="πŸ”Ή", graph_status="πŸ”Ή"
54
+ )
55
+ )
56
+ await status_msg.send()
57
+
58
+ documents = TextSplitter.split_text(file_text)
59
+ status_msg.content = status_msg_text.format(
60
+ read_status="🟒", split_status="🟒", load_status="πŸ”Ή", graph_status="πŸ”Ή"
61
+ )
62
+ await status_msg.send()
63
+
64
+ db = Qdrant.from_texts(
65
+ documents,
66
+ EmbeddingLLM,
67
+ location=":memory:",
68
+ collection_name="multi-agent-chatbot",
69
+ )
70
+ status_msg.content = status_msg_text.format(
71
+ read_status="🟒", split_status="🟒", load_status="🟒", graph_status="πŸ”Ή"
72
+ )
73
+ retriever = db.as_retriever()
74
+ await status_msg.update()
75
+
76
+ rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)
77
+ chain = (
78
+ {
79
+ "context": itemgetter("question") | retriever,
80
+ "question": itemgetter("question"),
81
+ }
82
+ | rag_prompt
83
+ | MiniLLM
84
+ | StrOutputParser()
85
+ )
86
+ research_chain = (
87
+ enter_chain | construct_research_graph(research_chain=chain).compile()
88
+ )
89
+ authoring_chain = enter_chain | construct_authoring_graph().compile()
90
+ correctness_chain = enter_chain | construct_correctness_graph().compile()
91
+ super_chain = (
92
+ enter_chain
93
+ | construct_super_graph(
94
+ research_chain, authoring_chain, correctness_chain
95
+ ).compile()
96
+ )
97
+
98
+ cl.user_session.set("chain", super_chain)
99
+ status_msg.content = status_msg_text.format(
100
+ read_status="🟒", split_status="🟒", load_status="🟒", graph_status="🟒"
101
+ )
102
+ await status_msg.update()
103
+
104
+ await cl.Message(msg.ASK_FOR_QUERY_MSG).send()
105
+
106
+
107
+ @cl.on_message
108
+ async def process_chat(message: cl.Message):
109
+ chain: RunnableSerializable = cl.user_session.get("chain")
110
+ try:
111
+ async for s in chain.astream(message.content, {"recursion_limit": 30}):
112
+ if "__end__" not in s:
113
+ await cl.Message(s).send()
114
+ except GraphRecursionError:
115
+ await cl.Message("Max recursion depth reached").send()
116
+ # response = cl.Message(content="")
117
+ # for token in result:
118
+ # await response.stream_token(token)
119
+ #
120
+ # await response.send()
121
+ # elements = [
122
+ # cl.File(
123
+ # name="hello.py",
124
+ # path="./hello.py",
125
+ # display="inline",
126
+ # ),
127
+ # ]
128
+ # await cl.Message(
129
+ # content="This message has a file element", elements=elements
130
+ # ).send()
app/constructors.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ from langchain.agents import AgentExecutor, create_openai_functions_agent
4
+ from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
5
+ from langchain.tools import Tool
6
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
7
+ from langchain_core.runnables.base import RunnableSerializable
8
+ from langchain_openai import ChatOpenAI
9
+ from langgraph.graph import END, StateGraph
10
+
11
+ from models import TurboLLM, MiniLLM
12
+ from states import ResearchTeamState, DocWritingState, State, CorrectnessState
13
+ from tools import (
14
+ TaviliTool,
15
+ write_document,
16
+ edit_document,
17
+ read_document,
18
+ create_outline,
19
+ )
20
+ from utils import agent_node, prelude, get_last_message, join_graph
21
+
22
+
23
+ def construct_agent(
24
+ llm: ChatOpenAI, tools: list, system_prompt: str, members: list
25
+ ) -> AgentExecutor:
26
+ """Create a function-calling agent and add it to the graph."""
27
+ system_prompt += (
28
+ "\nWork autonomously according to your specialty, using the tools available to you."
29
+ " Do not ask for clarification."
30
+ " Your other team members (and other teams) will collaborate with you with their own specialties."
31
+ " You are chosen for a reason! You are one of the following team members: {team_members}."
32
+ )
33
+ prompt = ChatPromptTemplate.from_messages(
34
+ [
35
+ (
36
+ "system",
37
+ system_prompt,
38
+ ),
39
+ MessagesPlaceholder(variable_name="messages"),
40
+ MessagesPlaceholder(variable_name="agent_scratchpad"),
41
+ ]
42
+ ).partial(team_members=", ".join(members))
43
+ agent = create_openai_functions_agent(llm, tools, prompt)
44
+ executor = AgentExecutor(agent=agent, tools=tools)
45
+ return executor
46
+
47
+
48
+ def construct_supervisor(
49
+ llm: ChatOpenAI, system_prompt: str, members: list
50
+ ) -> RunnableSerializable:
51
+ """An LLM-based router."""
52
+ options = ["FINISH"] + members
53
+ function_def = {
54
+ "name": "route",
55
+ "description": "Select the next role.",
56
+ "parameters": {
57
+ "title": "routeSchema",
58
+ "type": "object",
59
+ "properties": {
60
+ "next": {
61
+ "title": "Next",
62
+ "anyOf": [
63
+ {"enum": options},
64
+ ],
65
+ },
66
+ },
67
+ "required": ["next"],
68
+ },
69
+ }
70
+ prompt = ChatPromptTemplate.from_messages(
71
+ [
72
+ ("system", system_prompt),
73
+ MessagesPlaceholder(variable_name="messages"),
74
+ (
75
+ "system",
76
+ "Given the conversation above, who should act next?"
77
+ " Or should we FINISH? Select one of: {options}",
78
+ ),
79
+ ]
80
+ ).partial(options=str(options), team_members=", ".join(members))
81
+ chain = (
82
+ prompt
83
+ | llm.bind_functions(functions=[function_def], function_call="route")
84
+ | JsonOutputFunctionsParser()
85
+ )
86
+ return chain
87
+
88
+
89
+ def construct_research_graph(research_chain: RunnableSerializable) -> StateGraph:
90
+ retrieve_information = Tool(
91
+ name="RetrieveInformationTool",
92
+ func=lambda query: research_chain.invoke({"question": query}),
93
+ description="Use Retrieval Augmented Generation to retrieve information from the file provided by user.",
94
+ )
95
+ research_agent = construct_agent(
96
+ MiniLLM,
97
+ [retrieve_information],
98
+ "You are a research assistant who can provide specific information on the provided by user file."
99
+ "You must only respond with information about the paper related to the request.",
100
+ ["Search", "FileDataRetriever"],
101
+ )
102
+ research_node = functools.partial(
103
+ agent_node, agent=research_agent, name="FileDataRetriever"
104
+ )
105
+
106
+ search_agent = construct_agent(
107
+ MiniLLM,
108
+ [TaviliTool],
109
+ "You are a research assistant who can search for up-to-date info using the Tavily search engine.",
110
+ ["Search", "FileDataRetriever"],
111
+ )
112
+ search_node = functools.partial(agent_node, agent=search_agent, name="Search")
113
+
114
+ supervisor = construct_supervisor(
115
+ TurboLLM,
116
+ (
117
+ "You are a supervisor tasked with managing a conversation between the following workers:\n"
118
+ "{team_members}\n"
119
+ "Given the following user request, determine the subject to be researched and respond with the worker to act next.\n"
120
+ "Each worker will perform a task and respond with their results and status.\n"
121
+ "You should never ask your team to do anything beyond research. They are not required to write content or posts."
122
+ "You should only pass tasks to workers that are specifically research focused.\n"
123
+ "In most cases you always should use FileDataRetriever, "
124
+ "because it's core feature of app that will provide the most useful context.\n"
125
+ "When finished, respond with FINISH."
126
+ ),
127
+ ["Search", "FileDataRetriever"],
128
+ )
129
+ graph = StateGraph(ResearchTeamState)
130
+ graph.add_node("Search", search_node)
131
+ graph.add_node("FileDataRetriever", research_node)
132
+ graph.add_node("supervisor", supervisor)
133
+ graph.add_edge("Search", "supervisor")
134
+ graph.add_edge("FileDataRetriever", "supervisor")
135
+ graph.add_conditional_edges(
136
+ "supervisor",
137
+ lambda x: x["next"],
138
+ {
139
+ "Search": "Search",
140
+ "FileDataRetriever": "FileDataRetriever",
141
+ "FINISH": END,
142
+ },
143
+ )
144
+ graph.set_entry_point("supervisor")
145
+ return graph
146
+
147
+
148
+ def construct_authoring_graph() -> StateGraph:
149
+ doc_writer_agent = construct_agent(
150
+ MiniLLM,
151
+ [write_document, edit_document, read_document],
152
+ (
153
+ "You are an expert writing technical LinkedIn posts.\n"
154
+ "Below are files currently in your directory:\n{current_files}"
155
+ ),
156
+ ["DocWriter", "NoteTaker", "DopenessEditor", "CopyEditor"],
157
+ )
158
+ context_aware_doc_writer_agent = prelude | doc_writer_agent
159
+ doc_writing_node = functools.partial(
160
+ agent_node, agent=context_aware_doc_writer_agent, name="DocWriter"
161
+ )
162
+
163
+ note_taking_agent = construct_agent(
164
+ MiniLLM,
165
+ [create_outline, read_document],
166
+ (
167
+ "You are an expert senior researcher tasked with writing a LinkedIn post outline and"
168
+ " taking notes to craft a LinkedIn post.\n{current_files}"
169
+ ),
170
+ ["DocWriter", "NoteTaker", "DopenessEditor", "CopyEditor"],
171
+ )
172
+ context_aware_note_taking_agent = prelude | note_taking_agent
173
+ note_taking_node = functools.partial(
174
+ agent_node, agent=context_aware_note_taking_agent, name="NoteTaker"
175
+ )
176
+
177
+ copy_editor_agent = construct_agent(
178
+ MiniLLM,
179
+ [write_document, edit_document, read_document],
180
+ (
181
+ "You are an expert copy editor who focuses on fixing grammar, spelling, and tone issues\n"
182
+ "Below are files currently in your directory:\n{current_files}"
183
+ ),
184
+ ["DocWriter", "NoteTaker", "DopenessEditor", "CopyEditor"],
185
+ )
186
+ context_aware_copy_editor_agent = prelude | copy_editor_agent
187
+ copy_editing_node = functools.partial(
188
+ agent_node, agent=context_aware_copy_editor_agent, name="CopyEditor"
189
+ )
190
+
191
+ dopeness_editor_agent = construct_agent(
192
+ MiniLLM,
193
+ [write_document, edit_document, read_document],
194
+ (
195
+ "You are an expert in dopeness, litness, coolness, etc - you edit the document to make sure it's dope. Make sure to use a number of emojis."
196
+ "Below are files currently in your directory:\n{current_files}"
197
+ ),
198
+ ["DocWriter", "NoteTaker", "DopenessEditor", "CopyEditor"],
199
+ )
200
+ context_aware_dopeness_editor_agent = prelude | dopeness_editor_agent
201
+ dopeness_node = functools.partial(
202
+ agent_node, agent=context_aware_dopeness_editor_agent, name="DopenessEditor"
203
+ )
204
+
205
+ supervisor = construct_supervisor(
206
+ TurboLLM,
207
+ (
208
+ "You are a supervisor tasked with managing a conversation between the"
209
+ " following workers: {team_members}. You should always verify the technical"
210
+ " contents after any edits are made. "
211
+ "Given the following user request,"
212
+ " respond with the worker to act next. Each worker will perform a"
213
+ " task and respond with their results and status. When each team is finished,"
214
+ " you must respond with FINISH."
215
+ ),
216
+ ["DocWriter", "NoteTaker", "DopenessEditor", "CopyEditor"],
217
+ )
218
+
219
+ graph = StateGraph(DocWritingState)
220
+ graph.add_node("DocWriter", doc_writing_node)
221
+ graph.add_node("NoteTaker", note_taking_node)
222
+ graph.add_node("CopyEditor", copy_editing_node)
223
+ graph.add_node("DopenessEditor", dopeness_node)
224
+ graph.add_node("supervisor", supervisor)
225
+
226
+ graph.add_edge("DocWriter", "supervisor")
227
+ graph.add_edge("NoteTaker", "supervisor")
228
+ graph.add_edge("CopyEditor", "supervisor")
229
+ graph.add_edge("DopenessEditor", "supervisor")
230
+
231
+ graph.add_conditional_edges(
232
+ "supervisor",
233
+ lambda x: x["next"],
234
+ {
235
+ "DocWriter": "DocWriter",
236
+ "NoteTaker": "NoteTaker",
237
+ "CopyEditor": "CopyEditor",
238
+ "DopenessEditor": "DopenessEditor",
239
+ "FINISH": END,
240
+ },
241
+ )
242
+
243
+ graph.set_entry_point("supervisor")
244
+ return graph
245
+
246
+
247
+ def construct_correctness_graph() -> StateGraph:
248
+ style_agent = prelude | construct_agent(
249
+ MiniLLM,
250
+ [edit_document, read_document],
251
+ (
252
+ "You are an expert in analyzing LinkedIn posts.\n"
253
+ "Please verify the produced paper fits the theme and style of selected social media platform.\n"
254
+ "Make edits to the file to make it perfect for posting on social media.\n"
255
+ "Below are files currently in your directory:\n"
256
+ "{current_files}\n"
257
+ ),
258
+ ["StyleChecker", "EthicChecker", "FactChecker"],
259
+ )
260
+ style_node = functools.partial(agent_node, agent=style_agent, name="StyleChecker")
261
+ ethic_agent = prelude | construct_agent(
262
+ MiniLLM,
263
+ [edit_document, read_document],
264
+ (
265
+ "You are an expert in analyzing LinkedIn posts.\n"
266
+ "Please verify the produced paper does not violate the platform rules, "
267
+ "does not offend anyone and will not cause moral damage to anyone\n"
268
+ "Make edits to the file to make it perfect for posting on social media.\n"
269
+ "Below are files currently in your directory:\n"
270
+ "{current_files}\n"
271
+ ),
272
+ ["StyleChecker", "EthicChecker", "FactChecker"],
273
+ )
274
+ ethic_node = functools.partial(agent_node, agent=ethic_agent, name="EthicChecker")
275
+ fact_agent = prelude | construct_agent(
276
+ MiniLLM,
277
+ [edit_document, read_document, TaviliTool],
278
+ (
279
+ "You are an expert in analyzing LinkedIn posts.\n"
280
+ "Please verify the produced paper corresponds to reality, "
281
+ "there is no false or distorted information, "
282
+ "that there is no obvious slander.\n"
283
+ "For fact-checking, use the Tavili search engine, "
284
+ "to which you have access in the form of a tool.\n"
285
+ "Make edits to the file to make it perfect for posting on social media.\n"
286
+ "Below are files currently in your directory:\n"
287
+ "{current_files}\n"
288
+ ),
289
+ ["StyleChecker", "EthicChecker", "FactChecker"],
290
+ )
291
+ fact_node = functools.partial(agent_node, agent=fact_agent, name="FactChecker")
292
+ supervisor = construct_supervisor(
293
+ TurboLLM,
294
+ (
295
+ "You are a supervisor tasked with managing a conversation between the following workers: {team_members}.\n"
296
+ "You should always verify the technical contents after any edits are made.\n"
297
+ "Try to use the maximum number of workers, because each of them significantly affects the quality of "
298
+ "the generated response and the rule 'The More, The Better' works here, "
299
+ "so if you are not sure which workers to choose, choose all of them\n"
300
+ "Given the following user request, respond with the worker to act next.\n"
301
+ "Each worker will perform a task and respond with their results and status.\n"
302
+ "When each team is finished, you must respond with FINISH.\n"
303
+ ),
304
+ ["StyleChecker", "EthicChecker", "FactChecker"],
305
+ )
306
+ graph = StateGraph(CorrectnessState)
307
+ graph.add_node("StyleChecker", style_node)
308
+ graph.add_node("EthicChecker", ethic_node)
309
+ graph.add_node("FactChecker", fact_node)
310
+ graph.add_node("supervisor", supervisor)
311
+
312
+ graph.add_edge("StyleChecker", "supervisor")
313
+ graph.add_edge("EthicChecker", "supervisor")
314
+ graph.add_edge("FactChecker", "supervisor")
315
+
316
+ graph.add_conditional_edges(
317
+ "supervisor",
318
+ lambda x: x["next"],
319
+ {
320
+ "StyleChecker": "StyleChecker",
321
+ "EthicChecker": "EthicChecker",
322
+ "FactChecker": "FactChecker",
323
+ "FINISH": END,
324
+ },
325
+ )
326
+
327
+ graph.set_entry_point("supervisor")
328
+ return graph
329
+
330
+
331
+ def construct_super_graph(
332
+ research_chain: RunnableSerializable,
333
+ authoring_chain: RunnableSerializable,
334
+ correctness_chain: RunnableSerializable,
335
+ ) -> StateGraph:
336
+ supervisor_node = construct_supervisor(
337
+ TurboLLM,
338
+ "You are a supervisor tasked with managing a conversation between the"
339
+ " following teams: {team_members}. Given the following user request,"
340
+ " respond with the worker to act next. Each worker will perform a"
341
+ " task and respond with their results and status. When all workers are finished,"
342
+ " you must respond with FINISH.",
343
+ ["Research team", "LinkedIn team", "Correctness team"],
344
+ )
345
+ super_graph = StateGraph(State)
346
+ super_graph.add_node(
347
+ "Research team", get_last_message | research_chain | join_graph
348
+ )
349
+ super_graph.add_node(
350
+ "LinkedIn team", get_last_message | authoring_chain | join_graph
351
+ )
352
+ super_graph.add_node(
353
+ "Correctness team", get_last_message | correctness_chain | join_graph
354
+ )
355
+ super_graph.add_node("supervisor", supervisor_node)
356
+
357
+ super_graph.add_edge("Research team", "supervisor")
358
+ super_graph.add_edge("LinkedIn team", "supervisor")
359
+ super_graph.add_edge("Correctness team", "supervisor")
360
+
361
+ super_graph.add_conditional_edges(
362
+ "supervisor",
363
+ lambda x: x["next"],
364
+ {
365
+ "LinkedIn team": "LinkedIn team",
366
+ "Research team": "Research team",
367
+ "Correctness team": "Correctness team",
368
+ "FINISH": END,
369
+ },
370
+ )
371
+ super_graph.set_entry_point("supervisor")
372
+ return super_graph
app/messages.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INIT_MSG = """
2
+ Hello!
3
+ Please upload the file and I will make you a cool template for a social media post based on the file's contents!
4
+ It may take some time to process the file, so please be patient.
5
+ """
6
+
7
+ ASK_FOR_QUERY_MSG = """
8
+ Cool, now provide me any topic from this file and I'll return file with social media post.
9
+ You can also specify exact social media name and I'll optimize the post.
10
+ Example:
11
+ `The topic is: "Why AI will conquer the world" and social media is Twitter`
12
+ """
app/models.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from langchain_openai import ChatOpenAI
2
+ from langchain_openai.embeddings import OpenAIEmbeddings
3
+
4
+ EmbeddingLLM = OpenAIEmbeddings(model="text-embedding-3-small")
5
+ MiniLLM = ChatOpenAI(model="gpt-4o-mini")
6
+ TurboLLM = ChatOpenAI(model="gpt-4-turbo")
app/parsers.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod, ABC
2
+ from io import BytesIO
3
+
4
+ import PyPDF2
5
+
6
+
7
+ class BaseParser(ABC):
8
+ @abstractmethod
9
+ def load(self, data: bytes) -> str:
10
+ ...
11
+
12
+
13
+ class TxtParser(BaseParser):
14
+ def load(self, data: bytes) -> str:
15
+ return data.decode("utf-8", errors="ignore")
16
+
17
+
18
+ class PdfParser(BaseParser):
19
+ def load(self, data: bytes) -> str:
20
+ pdf_stream = BytesIO(data)
21
+ pdf_reader = PyPDF2.PdfReader(pdf_stream)
22
+ extracted_text = ""
23
+ for page_num in range(len(pdf_reader.pages)):
24
+ page = pdf_reader.pages[page_num]
25
+ extracted_text += page.extract_text()
26
+ return extracted_text
27
+
28
+
29
+ ParsersMap: dict[str, BaseParser] = {".txt": TxtParser(), ".pdf": PdfParser()}
app/prompts.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ RAG_PROMPT = """
2
+ CONTEXT:
3
+ {context}
4
+
5
+ QUERY:
6
+ {question}
7
+
8
+ You are a helpful assistant that creates social media posts based on the provided context (RAG).
9
+ Use the available context to generate response.
10
+ If there is no context please say: "Nothing found in source file".
11
+ """
app/splitters.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
2
+ from utils import tiktoken_len
3
+
4
+
5
+ TextSplitter = RecursiveCharacterTextSplitter(
6
+ chunk_size=300,
7
+ chunk_overlap=0,
8
+ length_function=tiktoken_len,
9
+ )
app/states.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ from typing import TypedDict
3
+
4
+ from langchain_core.messages import BaseMessage
5
+ from typing import Annotated, List
6
+
7
+
8
+ class ResearchTeamState(TypedDict):
9
+ messages: Annotated[List[BaseMessage], operator.add]
10
+ team_members: List[str]
11
+ next: str
12
+
13
+
14
+ class DocWritingState(TypedDict):
15
+ messages: Annotated[List[BaseMessage], operator.add]
16
+ team_members: str
17
+ next: str
18
+ current_files: str
19
+
20
+
21
+ class CorrectnessState(TypedDict):
22
+ messages: Annotated[List[BaseMessage], operator.add]
23
+ next: str
24
+
25
+
26
+ class State(TypedDict):
27
+ messages: Annotated[List[BaseMessage], operator.add]
28
+ next: str
app/tools.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Annotated, List, Dict, Optional
2
+
3
+ from langchain.tools import tool
4
+ from langchain_community.tools.tavily_search import TavilySearchResults
5
+
6
+ from utils import WORKING_DIRECTORY
7
+
8
+
9
+ TaviliTool = TavilySearchResults(max_results=3)
10
+
11
+
12
+ @tool
13
+ def create_outline(
14
+ points: Annotated[List[str], "List of main points or sections."],
15
+ file_name: Annotated[str, "File path to save the outline."],
16
+ ) -> Annotated[str, "Path of the saved outline file."]:
17
+ """Create and save an outline."""
18
+ with (WORKING_DIRECTORY / file_name).open("w") as file:
19
+ for i, point in enumerate(points):
20
+ file.write(f"{i + 1}. {point}\n")
21
+ return f"Outline saved to {file_name}"
22
+
23
+
24
+ @tool
25
+ def read_document(
26
+ file_name: Annotated[str, "File path to save the document."],
27
+ start: Annotated[Optional[int], "The start line. Default is 0"] = None,
28
+ end: Annotated[Optional[int], "The end line. Default is None"] = None,
29
+ ) -> str:
30
+ """Read the specified document."""
31
+ with (WORKING_DIRECTORY / file_name).open("r") as file:
32
+ lines = file.readlines()
33
+ if start is not None:
34
+ start = 0
35
+ return "\n".join(lines[start:end])
36
+
37
+
38
+ @tool(return_direct=True)
39
+ def write_document(
40
+ content: Annotated[str, "Text content to be written into the document."],
41
+ file_name: Annotated[str, "File path to save the document."],
42
+ ) -> Annotated[str, "Path of the saved document file."]:
43
+ """Create and save a text document."""
44
+ with (WORKING_DIRECTORY / file_name).open("w") as file:
45
+ file.write(content)
46
+ return f"Document saved to {file_name}"
47
+
48
+
49
+ @tool
50
+ def edit_document(
51
+ file_name: Annotated[str, "Path of the document to be edited."],
52
+ inserts: Annotated[
53
+ Dict[int, str],
54
+ "Dictionary where key is the line number (1-indexed) and value is the text to be inserted at that line.",
55
+ ] = None,
56
+ ) -> Annotated[str, "Path of the edited document file."]:
57
+ """Edit a document by inserting text at specific line numbers."""
58
+ inserts = {} if inserts is None else inserts
59
+ with (WORKING_DIRECTORY / file_name).open("r") as file:
60
+ lines = file.readlines()
61
+
62
+ sorted_inserts = sorted(inserts.items())
63
+
64
+ for line_number, text in sorted_inserts:
65
+ if 1 <= line_number <= len(lines) + 1:
66
+ lines.insert(line_number - 1, text + "\n")
67
+ else:
68
+ return f"Error: Line number {line_number} is out of range."
69
+
70
+ with (WORKING_DIRECTORY / file_name).open("w") as file:
71
+ file.writelines(lines)
72
+
73
+ return f"Document edited and saved to {file_name}"
app/utils.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import uuid
3
+ from pathlib import Path
4
+ import tiktoken
5
+ from langchain_core.messages import HumanMessage
6
+
7
+ from states import State
8
+
9
+ os.makedirs("./data", exist_ok=True)
10
+
11
+
12
+ def tiktoken_len(text):
13
+ tokens = tiktoken.encoding_for_model("gpt-4o-mini").encode(
14
+ text,
15
+ )
16
+ return len(tokens)
17
+
18
+
19
+ def agent_node(state, agent, name):
20
+ result = agent.invoke(state)
21
+ return {"messages": [HumanMessage(content=result["output"], name=name)]}
22
+
23
+
24
+ def enter_chain(message: str) -> dict:
25
+ results = {
26
+ "messages": [HumanMessage(content=message)],
27
+ }
28
+ return results
29
+
30
+
31
+ def get_last_message(state: State) -> str:
32
+ return state["messages"][-1].content
33
+
34
+
35
+ def join_graph(response: dict):
36
+ return {"messages": [response["messages"][-1]]}
37
+
38
+
39
+ def create_random_subdirectory():
40
+ random_id = str(uuid.uuid4())[:8] # Use first 8 characters of a UUID
41
+ subdirectory_path = os.path.join("./data", random_id)
42
+ os.makedirs(subdirectory_path, exist_ok=True)
43
+ return subdirectory_path
44
+
45
+
46
+ def prelude(state):
47
+ written_files = []
48
+ if not WORKING_DIRECTORY.exists():
49
+ WORKING_DIRECTORY.mkdir()
50
+ try:
51
+ written_files = [
52
+ f.relative_to(WORKING_DIRECTORY) for f in WORKING_DIRECTORY.rglob("*")
53
+ ]
54
+ except Exception:
55
+ pass
56
+ if not written_files:
57
+ return {**state, "current_files": "No files written."}
58
+ return {
59
+ **state,
60
+ "current_files": "\nBelow are files your team has written to the directory:\n"
61
+ + "\n".join([f" - {f}" for f in written_files]),
62
+ }
63
+
64
+
65
+ WORKING_DIRECTORY = Path(create_random_subdirectory())
chainlit.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Welcome to Chainlit! πŸš€πŸ€–
2
+
3
+ Hi there, Developer! πŸ‘‹ We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs.
4
+
5
+ ## Useful Links πŸ”—
6
+
7
+ - **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) πŸ“š
8
+ - **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/k73SQ3FyUh) to ask questions, share your projects, and connect with other developers! πŸ’¬
9
+
10
+ We can't wait to see what you create with Chainlit! Happy coding! πŸ’»πŸ˜Š
11
+
12
+ ## Welcome screen
13
+
14
+ To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty.
data/4fd7e547/Mu_Shu_Pork_Recipe.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 🌟 Exciting Culinary Experience: Mu Shu Pork Recipe! 🌟
2
+
3
+ Have you ever tried making Mu Shu Pork at home? If not, now is the perfect time to roll up your sleeves and dive into this delicious dish! 🀀 Here’s a simple yet mouth-watering recipe to bring a taste of Chinese cuisine to your kitchen.
4
+
5
+ **Ingredients:**
6
+ - 1/2 lb pork tenderloin, cut into 1/4-inch strips
7
+
8
+ **Marinade:**
9
+ - 2 tsps soy sauce
10
+ - 1/2 tsp cornstarch
11
+ - 1/8 tsp salt
12
+ - 1/2 tsp dry sherry
13
+ - 1/4 tsp sugar
14
+
15
+ **Cooking Sauce:**
16
+ - 2 tsps soy sauce
17
+ - 2 tsps dry sherry
18
+ - 1 tsp sesame oil
19
+ - 1/2 tsp sugar
20
+
21
+ **Other Essentials:**
22
+ - 10 6-inch flour tortillas
23
+ - 2 1/2 tsps cooking oil
24
+ - 2 eggs, beaten with 1/4 tsp salt
25
+ - 1 1/2 oz vermicelli, soaked and cut into 2-inch pieces
26
+ - 1 large tree ear mushroom, soaked and thinly sliced
27
+ - 4 medium Chinese dried mushrooms, soaked and thinly sliced
28
+ - 3 whole green onions, shredded
29
+ - 1/2 cup red bell pepper, shredded
30
+ - 1/2 cup bamboo shoots, cut into matchstick pieces
31
+ - Hoisin sauce for serving
32
+
33
+ **Instructions:**
34
+ 1. Combine marinade ingredients with the pork and let it marinate.
35
+ 2. Wrap tortillas in foil and warm in a 350Β°F oven for 10 minutes.
36
+ 3. Heat a skillet with oil, cook the beaten eggs into a thin pancake, remove, and cut into strips.
37
+
38
+ Enjoy your homemade Mu Shu Pork! βœ…πŸ½οΈβœ¨
39
+
40
+ Let me know how yours turns out! #Cooking #Recipe #MuShuPork #Foodie
41
+
42
+ Feel free to share your thoughts and any variations you try! 🍜πŸ₯’
data/4fd7e547/Mu_Shu_Pork_Recipe_Outline.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ 1. Introduction to Mu Shu Pork
2
+ 2. Ingredients List
3
+ 3. Marinade Components
4
+ 4. Cooking Sauce Ingredients
5
+ 5. Essential Cooking Items
6
+ 6. Step-by-Step Instructions
7
+ 7. Serving Suggestions
8
+ 8. Call-to-Action
pyproject.toml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "06-multi-agent-with-langgraph"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.13"
7
+ dependencies = [
8
+ "aiofiles>=23.2.1",
9
+ "arxiv>=2.1.3",
10
+ "chainlit>=2.1.1",
11
+ "jupyter>=1.1.1",
12
+ "langchain>=0.3.17",
13
+ "langchain-community>=0.3.16",
14
+ "langchain-core>=0.3.33",
15
+ "langchain-openai>=0.3.3",
16
+ "langgraph>=0.2.69",
17
+ "pymupdf>=1.25.2",
18
+ "pypdf2>=3.0.1",
19
+ "qdrant-client>=1.13.2",
20
+ "tiktoken>=0.8.0",
21
+ ]
uv.lock ADDED
The diff for this file is too large to render. See raw diff