rahul-02 commited on
Commit
ebbf92b
·
verified ·
1 Parent(s): 7a2948d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -61
app.py CHANGED
@@ -1,62 +1,61 @@
1
- import os
2
- from flask import Flask, render_template, request, jsonify
3
- from dotenv import load_dotenv
4
-
5
- # USE THE CLASSIC BRIDGE FOR 2026 COMPATIBILITY
6
- from langchain_classic.agents import AgentExecutor, create_react_agent
7
- from langchain_classic.tools import Tool
8
- from langchain_huggingface import HuggingFaceEndpoint
9
- from langchain.memory import ConversationBufferMemory
10
- from langchain import hub
11
- from duckduckgo_search import DDGS
12
-
13
- load_dotenv()
14
- app = Flask(__name__)
15
-
16
- HF_TOKEN = os.getenv("HF_TOKEN")
17
-
18
- # LLM Setup
19
- llm = HuggingFaceEndpoint(
20
- repo_id="meta-llama/Llama-3.2-3B-Instruct",
21
- huggingfacehub_api_token=HF_TOKEN,
22
- temperature=0.7,
23
- provider="hf-inference"
24
- )
25
-
26
- # Search Tool
27
- def custom_ddg_search(query: str):
28
- try:
29
- with DDGS() as ddgs:
30
- results = list(ddgs.text(query, max_results=3))
31
- return "\n".join([f"{r['title']}: {r['body']}" for r in results])
32
- except:
33
- return "Search error. Try again."
34
-
35
- tools = [
36
- Tool(name="web_search", func=custom_ddg_search, description="Search the web.")
37
- ]
38
-
39
- # Initialize Agent via Classic methods
40
- prompt = hub.pull("hwchase17/react")
41
- agent = create_react_agent(llm, tools, prompt)
42
- memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
43
-
44
- agent_executor = AgentExecutor(
45
- agent=agent,
46
- tools=tools,
47
- memory=memory,
48
- handle_parsing_errors=True
49
- )
50
-
51
- @app.route("/")
52
- def index():
53
- return render_template("index.html")
54
-
55
- @app.route("/ask", methods=["POST"])
56
- def ask():
57
- user_query = request.json.get("query")
58
- result = agent_executor.invoke({"input": user_query})
59
- return jsonify({"answer": result["output"]})
60
-
61
- if __name__ == "__main__":
62
  app.run(host="0.0.0.0", port=7860)
 
1
+ import os
2
+ from flask import Flask, render_template, request, jsonify
3
+ from dotenv import load_dotenv
4
+
5
+ from langchain_classic.agents import AgentExecutor, create_react_agent
6
+ from langchain_classic.tools import Tool
7
+ from langchain_huggingface import HuggingFaceEndpoint
8
+ from langchain_classic.memory import ConversationBufferMemory
9
+ from langchain import hub
10
+ from duckduckgo_search import DDGS
11
+
12
+ load_dotenv()
13
+ app = Flask(__name__)
14
+
15
+ HF_TOKEN = os.getenv("HF_TOKEN")
16
+
17
+ # LLM Setup
18
+ llm = HuggingFaceEndpoint(
19
+ repo_id="meta-llama/Llama-3.2-3B-Instruct",
20
+ huggingfacehub_api_token=HF_TOKEN,
21
+ temperature=0.7,
22
+ provider="hf-inference"
23
+ )
24
+
25
+ # Search Tool
26
+ def custom_ddg_search(query: str):
27
+ try:
28
+ with DDGS() as ddgs:
29
+ results = list(ddgs.text(query, max_results=3))
30
+ return "\n".join([f"{r['title']}: {r['body']}" for r in results])
31
+ except:
32
+ return "Search error. Try again."
33
+
34
+ tools = [
35
+ Tool(name="web_search", func=custom_ddg_search, description="Search the web.")
36
+ ]
37
+
38
+ # Initialize Agent via Classic methods
39
+ prompt = hub.pull("hwchase17/react")
40
+ agent = create_react_agent(llm, tools, prompt)
41
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
42
+
43
+ agent_executor = AgentExecutor(
44
+ agent=agent,
45
+ tools=tools,
46
+ memory=memory,
47
+ handle_parsing_errors=True
48
+ )
49
+
50
+ @app.route("/")
51
+ def index():
52
+ return render_template("index.html")
53
+
54
+ @app.route("/ask", methods=["POST"])
55
+ def ask():
56
+ user_query = request.json.get("query")
57
+ result = agent_executor.invoke({"input": user_query})
58
+ return jsonify({"answer": result["output"]})
59
+
60
+ if __name__ == "__main__":
 
61
  app.run(host="0.0.0.0", port=7860)