shan gao commited on
Commit
7592d9f
·
1 Parent(s): c9c06ab

Add application file

Browse files
Files changed (2) hide show
  1. app.py +233 -0
  2. requirement.txt +8 -0
app.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import gradio as gr
4
+ import datasets
5
+ from typing import List, Tuple
6
+
7
+ # LangChain / LangGraph imports
8
+ from langchain_core.documents import Document
9
+ from langchain_community.tools import DuckDuckGoSearchRun
10
+ from langchain_huggingface import HuggingFaceEmbeddings
11
+ from langchain_community.vectorstores import FAISS
12
+ from langchain.tools import Tool
13
+ from langchain_core.messages import AnyMessage, HumanMessage, AIMessage, ToolMessage
14
+ from typing import TypedDict, Annotated
15
+ from langgraph.graph.message import add_messages
16
+ from langgraph.prebuilt import ToolNode
17
+ from langgraph.graph import START, StateGraph
18
+ from langgraph.prebuilt import tools_condition
19
+ from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
20
+
21
+ def set_token_hfhub(value):
22
+ os.environ["HF_TOKEN"] = value
23
+
24
+ # ==============================
25
+ # 1) Build the same agent (Alfred)
26
+ # ==============================
27
+
28
+ # Load the dataset and make Documents
29
+ guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train")
30
+ docs = [
31
+ Document(
32
+ page_content="\n".join(
33
+ [
34
+ f"Name: {guest['name']}",
35
+ f"Relation: {guest['relation']}",
36
+ f"Description: {guest['description']}",
37
+ f"Email: {guest['email']}",
38
+ ]
39
+ ),
40
+ metadata={"name": guest["name"]},
41
+ )
42
+ for guest in guest_dataset
43
+ ]
44
+
45
+ # Embeddings & Vectorstore retriever
46
+ embeddings = HuggingFaceEmbeddings(
47
+ model_name="sentence-transformers/all-MiniLM-L6-v2",
48
+ encode_kwargs={"normalize_embeddings": True},
49
+ )
50
+ vectorstore = FAISS.from_documents(docs, embeddings)
51
+ retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 3})
52
+
53
+ # Guest info tool
54
+ def extract_text(query: str) -> str:
55
+ """Retrieves detailed information about gala guests based on their name or relation."""
56
+ results = retriever.invoke(query)
57
+ if results:
58
+ return "\n\n".join([doc.page_content for doc in results])
59
+ else:
60
+ return "No matching guest information found."
61
+
62
+ guest_info_tool = Tool(
63
+ name="guest_info_retriever",
64
+ func=extract_text,
65
+ description="Retrieves detailed information about gala guests based on their name or relation.",
66
+ )
67
+
68
+ # Web search tool
69
+ search_tool = DuckDuckGoSearchRun()
70
+
71
+ # LLM endpoint (reads token from env var or Python var fallback)
72
+ # hf_token = os.getenv("HF_TOKEN")
73
+ hf_token = os.environ["HF_TOKEN"]
74
+
75
+ if not hf_token:
76
+ raise RuntimeError(
77
+ "HUGGINGFACEHUB_API_TOKEN is not set. Please export it before running the app."
78
+ )
79
+
80
+ llm = HuggingFaceEndpoint(
81
+ repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
82
+ huggingfacehub_api_token=hf_token,
83
+ )
84
+ chat = ChatHuggingFace(llm=llm, verbose=True)
85
+ tools = [guest_info_tool, search_tool]
86
+ chat_with_tools = chat.bind_tools(tools)
87
+
88
+ # Agent state & node
89
+ class AgentState(TypedDict):
90
+ messages: Annotated[List[AnyMessage], add_messages]
91
+
92
+ def assistant(state: AgentState):
93
+ # Produce one assistant message (may include a tool call)
94
+ return {"messages": [chat_with_tools.invoke(state["messages"])]}
95
+
96
+ # Graph wiring
97
+ builder = StateGraph(AgentState)
98
+ builder.add_node("assistant", assistant)
99
+ builder.add_node("tools", ToolNode(tools))
100
+ builder.add_edge(START, "assistant")
101
+ builder.add_conditional_edges("assistant", tools_condition)
102
+ builder.add_edge("tools", "assistant")
103
+ alfred = builder.compile()
104
+
105
+ # ======================================
106
+ # 2) Helper functions for the Gradio UI
107
+ # ======================================
108
+
109
+ def _msg_content_to_str(msg: AnyMessage) -> str:
110
+ """
111
+ Coerce LangChain message content (which might contain tool call structures)
112
+ into displayable text for the Chatbot.
113
+ """
114
+ # Most often, content is a string already
115
+ content = getattr(msg, "content", "")
116
+ if isinstance(content, str):
117
+ return content
118
+
119
+ # If it's a list of parts (e.g., tool call traces), join any text parts
120
+ if isinstance(content, list):
121
+ texts = []
122
+ for part in content:
123
+ if isinstance(part, dict) and "text" in part:
124
+ texts.append(part["text"])
125
+ elif isinstance(part, str):
126
+ texts.append(part)
127
+ return "\n".join(texts) if texts else str(content)
128
+
129
+ # Fallback
130
+ return str(content)
131
+
132
+ def startup_state() -> List[AnyMessage]:
133
+ """Start with an empty conversation."""
134
+ return []
135
+
136
+ # Gradio expects chatbot history as List[Tuple[str, str]]
137
+ def submit_user_message(
138
+ user_text: str,
139
+ chat_history: List[Tuple[str, str]],
140
+ agent_messages: List[AnyMessage],
141
+ ):
142
+ """
143
+ 1) Append HumanMessage to agent state
144
+ 2) Run Alfred
145
+ 3) Extract last AIMessage and append to chat_history
146
+ """
147
+ if not user_text or user_text.strip() == "":
148
+ return gr.update(), chat_history, agent_messages
149
+
150
+ # Step 1: add HumanMessage to state
151
+ agent_messages = list(agent_messages or [])
152
+ agent_messages.append(HumanMessage(content=user_text))
153
+
154
+ # Step 2: run the graph
155
+ out = alfred.invoke({"messages": agent_messages})
156
+
157
+ # The graph returns a new messages list *including* the latest assistant/tool steps.
158
+ # We use the last AIMessage as the displayed reply.
159
+ new_msgs: List[AnyMessage] = out["messages"]
160
+ agent_messages = new_msgs # keep full state for the next turn
161
+
162
+ # Find the last assistant message to show in the UI
163
+ ai_text = ""
164
+ for m in reversed(new_msgs):
165
+ if isinstance(m, AIMessage):
166
+ ai_text = _msg_content_to_str(m)
167
+ break
168
+ if not ai_text:
169
+ # fallback: in rare cases of only tool messages, show a generic note
170
+ ai_text = "I processed your request using my tools."
171
+
172
+ chat_history = list(chat_history or [])
173
+ chat_history.append({"role": "user", "content": user_text})
174
+ chat_history.append({"role": "assistant", "content": ai_text})
175
+ return "", chat_history, agent_messages
176
+
177
+ def clear_chat():
178
+ """Reset the Gradio UI and agent state."""
179
+ return [], startup_state()
180
+
181
+ # ========================
182
+ # 3) Gradio App UI layout
183
+ # ========================
184
+
185
+ with gr.Blocks(title="Alfred — LangGraph Agent") as demo:
186
+ gr.Markdown(
187
+ """
188
+ # 🎩 Alfred — Your LangGraph Agent
189
+ Ask questions and Alfred will respond, using:
190
+ - a vector search tool over the guest list
191
+ - DuckDuckGo web search
192
+ """
193
+ )
194
+
195
+ with gr.Row():
196
+ token1 = gr.Textbox(
197
+ label="Your hf token",
198
+ autofocus=True,
199
+ scale=2,
200
+ )
201
+
202
+ with gr.Row():
203
+ chatbot = gr.Chatbot(
204
+ label="Conversation",
205
+ type="messages",
206
+ height=500,
207
+ show_copy_button=True,
208
+ avatar_images=(None, None), # customize if you like
209
+ )
210
+
211
+ with gr.Row():
212
+ txt = gr.Textbox(
213
+ label="Your message",
214
+ placeholder="Ask anything…",
215
+ autofocus=True,
216
+ scale=4,
217
+ )
218
+ send_btn = gr.Button("Send", variant="primary", scale=1)
219
+ clear_btn = gr.Button("Clear")
220
+
221
+ # Hidden state: the agent’s full message list (LangChain messages)
222
+ agent_state = gr.State(startup_state())
223
+
224
+ # Wire up events
225
+ token1.submit(set_token_hfhub, [token1])
226
+ txt.submit(submit_user_message, [txt, chatbot, agent_state], [txt, chatbot, agent_state])
227
+ send_btn.click(submit_user_message, [txt, chatbot, agent_state], [txt, chatbot, agent_state])
228
+ clear_btn.click(clear_chat, outputs=[chatbot, agent_state])
229
+
230
+ # Entry point
231
+ if __name__ == "__main__":
232
+ # You can tweak server_name/port as needed
233
+ demo.launch()
requirement.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ langchain
3
+ langgraph
4
+ langchain-community
5
+ langchain_huggingface
6
+ datasets
7
+ faiss-cpu
8
+ ddgs