rahul-02 commited on
Commit
b78bb65
·
verified ·
1 Parent(s): 61077b3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -55
app.py CHANGED
@@ -1,62 +1,18 @@
1
  import os
2
  from flask import Flask, render_template, request, jsonify
3
  from flask_cors import CORS
4
- from langchain_huggingface import HuggingFaceEndpoint
5
- # THE STABLE IMPORTS - NO MORE MISSING NAMES
6
- from langchain.agents import AgentExecutor, create_react_agent
7
- from langchain_core.tools import Tool
8
- from langchain_core.prompts import PromptTemplate
9
- from duckduckgo_search import DDGS
10
 
11
  app = Flask(__name__)
12
  CORS(app)
13
 
14
- # 1. Setup LLM
15
- sec_token = os.getenv("HF_TOKEN")
16
- llm = HuggingFaceEndpoint(
17
- repo_id="meta-llama/Llama-3.2-3B-Instruct",
18
- huggingfacehub_api_token=sec_token,
19
  )
20
 
21
- # 2. Manual Search
22
- def manual_search(query: str):
23
- try:
24
- with DDGS() as ddgs:
25
- results = [r['body'] for r in ddgs.text(query, max_results=3)]
26
- return "\n".join(results) if results else "No results found."
27
- except Exception as e:
28
- return f"Search error: {str(e)}"
29
-
30
- tools = [
31
- Tool(
32
- name="Search",
33
- func=manual_search,
34
- description="Useful for finding current info on the web."
35
- )
36
- ]
37
-
38
- # 3. Explicit Prompt Template (This is required for create_react_agent)
39
- template = """Answer the following questions:
40
- {tools}
41
- Format:
42
- Question: {input}
43
- Thought: {agent_scratchpad}
44
- Action: the action to take, should be one of [{tool_names}]
45
- Action Input: the input to the action
46
- Observation: the result of the action
47
- ... (repeat)
48
- Final Answer: the final answer
49
-
50
- Begin!
51
- Question: {input}
52
- Thought: {agent_scratchpad}"""
53
-
54
- prompt = PromptTemplate.from_template(template)
55
-
56
- # 4. Initialize Agent
57
- agent = create_react_agent(llm, tools, prompt)
58
- agent_executor = AgentExecutor(agent=agent, tools=tools, handle_parsing_errors=True)
59
-
60
  @app.route('/')
61
  def index():
62
  return render_template('index.html')
@@ -65,12 +21,25 @@ def index():
65
  def ask():
66
  try:
67
  data = request.get_json()
68
- query = data.get("query")
69
- # Run agent
70
- result = agent_executor.invoke({"input": query})
71
- return jsonify({"answer": result["output"]})
 
 
 
 
 
 
 
 
 
 
 
72
  except Exception as e:
73
- return jsonify({"answer": f"System Error: {str(e)}"}), 500
 
 
74
 
75
  if __name__ == "__main__":
76
  app.run(host="0.0.0.0", port=7860)
 
1
  import os
2
  from flask import Flask, render_template, request, jsonify
3
  from flask_cors import CORS
4
+ from huggingface_hub import InferenceClient
 
 
 
 
 
5
 
6
  app = Flask(__name__)
7
  CORS(app)
8
 
9
+ # 1. Initialize the Official Client
10
+ # This uses your HF_TOKEN secret automatically
11
+ client = InferenceClient(
12
+ model="meta-llama/Llama-3.2-3B-Instruct",
13
+ token=os.getenv("HF_TOKEN")
14
  )
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  @app.route('/')
17
  def index():
18
  return render_template('index.html')
 
21
  def ask():
22
  try:
23
  data = request.get_json()
24
+ user_query = data.get("query")
25
+
26
+ # 2. Simple, Direct Call to Llama
27
+ response = ""
28
+ for message in client.chat_completion(
29
+ messages=[{"role": "user", "content": user_query}],
30
+ max_tokens=500,
31
+ stream=True,
32
+ ):
33
+ token = message.choices[0].delta.content
34
+ if token:
35
+ response += token
36
+
37
+ return jsonify({"answer": response})
38
+
39
  except Exception as e:
40
+ print(f"Error: {str(e)}")
41
+ # This will tell us EXACTLY if the token is the problem
42
+ return jsonify({"answer": f"System Status: {str(e)}"}), 500
43
 
44
  if __name__ == "__main__":
45
  app.run(host="0.0.0.0", port=7860)