DechowWen commited on
Commit
577a2bb
·
verified ·
1 Parent(s): 9570ac3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -22
app.py CHANGED
@@ -1,33 +1,74 @@
 
1
  import gradio as gr
2
- import random
3
- from smolagents import GradioUI, CodeAgent, HfApiModel
4
 
5
- # Import our custom tools from their modules
6
- from tools import DuckDuckGoSearchTool, WeatherInfoTool, HubStatsTool
7
- from retriever import load_guest_dataset
 
 
 
 
8
 
9
- # Initialize the Hugging Face model
10
- model = HfApiModel()
11
 
12
- # Initialize the web search tool
13
- search_tool = DuckDuckGoSearchTool()
14
 
15
- # Initialize the weather tool
16
- weather_info_tool = WeatherInfoTool()
17
 
18
- # Initialize the Hub stats tool
19
- hub_stats_tool = HubStatsTool()
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- # Load the guest dataset and initialize the guest info tool
22
- guest_info_tool = load_guest_dataset()
 
 
23
 
24
- # Create Alfred with all the tools
25
- alfred = CodeAgent(
26
- tools=[guest_info_tool, weather_info_tool, hub_stats_tool, search_tool],
27
- model=model,
28
- add_base_tools=True, # Add any additional base tools
29
- planning_interval=3 # Enable planning every 3 steps
 
 
 
 
 
 
 
 
30
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  if __name__ == "__main__":
33
- GradioUI(alfred).launch()
 
 
 
 
 
 
1
+ import os
2
  import gradio as gr
 
 
3
 
4
+ from typing import TypedDict, Annotated
5
+ from langgraph.graph.message import add_messages
6
+ from langchain_core.messages import AnyMessage, AIMessage, HumanMessage
7
+ from langgraph.prebuilt import ToolNode
8
+ from langgraph.graph import START, StateGraph
9
+ from langgraph.prebuilt import tools_condition
10
+ from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
11
 
12
+ from tools import DuckDuckGoSearchRun, weather_info_tool, hub_stats_tool
13
+ from retriever import guest_info_tool
14
 
15
+ hf_token = os.getenv("HF_TOKEN")
 
16
 
17
+ # 初始化网络搜索工具
18
+ search_tool = DuckDuckGoSearchRun()
19
 
20
+ # 生成包含工具的聊天接口
21
+ llm = HuggingFaceEndpoint(
22
+ repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
23
+ huggingfacehub_api_token=hf_token,
24
+ )
25
+
26
+ chat = ChatHuggingFace(llm=llm, verbose=True)
27
+ tools = [guest_info_tool, search_tool, weather_info_tool, hub_stats_tool]
28
+ chat_with_tools = chat.bind_tools(tools)
29
+
30
+ # 生成 AgentState 和 Agent 图
31
+ class AgentState(TypedDict):
32
+ messages: Annotated[list[AnyMessage], add_messages]
33
 
34
+ def assistant(state: AgentState):
35
+ return {
36
+ "messages": [chat_with_tools.invoke(state["messages"])],
37
+ }
38
 
39
+ ## 构建流程图
40
+ builder = StateGraph(AgentState)
41
+
42
+ # 定义节点:执行具体工作
43
+ builder.add_node("assistant", assistant)
44
+ builder.add_node("tools", ToolNode(tools))
45
+
46
+ # 定义边:控制流程走向
47
+ builder.add_edge(START, "assistant")
48
+ builder.add_conditional_edges(
49
+ "assistant",
50
+ # 如果最新消息需要工具调用,则路由到 tools 节点
51
+ # 否则直接响应
52
+ tools_condition,
53
  )
54
+ builder.add_edge("tools", "assistant")
55
+ alfred = builder.compile()
56
+
57
+ def predict(message, history):
58
+ history_langchain_format = []
59
+ for msg in history:
60
+ if msg['role'] == "user":
61
+ history_langchain_format.append(HumanMessage(content=msg['content']))
62
+ elif msg['role'] == "assistant":
63
+ history_langchain_format.append(AIMessage(content=msg['content']))
64
+ history_langchain_format.append(HumanMessage(content=message))
65
+ gpt_response = alfred.invoke({"messages": history_langchain_format})
66
+ return gpt_response["messages"][-1].content
67
 
68
  if __name__ == "__main__":
69
+ demo = gr.ChatInterface(
70
+ predict,
71
+ type="messages"
72
+ )
73
+
74
+ demo.launch()