Lui3ui3ui commited on
Commit
a2b27f5
·
verified ·
1 Parent(s): cb6cb9c

Upload agents.py

Browse files
Files changed (1) hide show
  1. agents.py +200 -13
agents.py CHANGED
@@ -1,13 +1,200 @@
1
- from huggingface_hub import InferenceClient
2
-
3
- client = InferenceClient(token=HF_API_TOKEN)
4
-
5
- try:
6
- response = client.text_generation(
7
- model="tiiuae/falcon-7b-instruct",
8
- prompt="Hello, who are you?",
9
- max_new_tokens=50
10
- )
11
- print(response)
12
- except Exception as e:
13
- print(f"Test failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langgraph.graph import StateGraph, END
2
+ from search import duckduckgo_search
3
+ import asyncio
4
+ import re
5
+ import json
6
+ import asyncio
7
+ import ast
8
+ from transformers import pipeline
9
+ import os
10
+ import torch
11
+
12
+ HF_API_TOKEN = os.environ.get("HF_API_TOKEN")
13
+
14
+ generator = pipeline("text-generation", model="tiiuae/falcon-7b-instruct", token=HF_API_TOKEN)
15
+
16
+ def call_llama3(prompt):
17
+ output = generator(prompt, max_new_tokens=500)
18
+ return output[0]['generated_text']
19
+
20
+ class AsyncLogger:
21
+ def __init__(self):
22
+ self._log = []
23
+ self._lock = asyncio.Lock()
24
+
25
+ async def log(self, message):
26
+ async with self._lock:
27
+ self._log.append(message)
28
+
29
+ async def get_log(self):
30
+ async with self._lock:
31
+ return "\n".join(self._log)
32
+
33
+ async def clear(self):
34
+ async with self._lock:
35
+ self._log.clear()
36
+
37
+ logger = AsyncLogger()
38
+
39
+ def extract_json_array(text):
40
+ # Extract JSON block from anywhere in the text
41
+ pattern = r"(\[.*?\])" # non-greedy match to get the smallest bracketed block
42
+ matches = re.findall(pattern, text, flags=re.DOTALL)
43
+
44
+ for candidate in matches:
45
+ try:
46
+ # Attempt to load as JSON
47
+ return json.loads(candidate)
48
+ except json.JSONDecodeError as e:
49
+ print(f"json.loads error: {e}")
50
+ continue
51
+
52
+ return []
53
+
54
+ # Node 1: Extract books from user input
55
+ async def extract_books_node(state):
56
+ await logger.clear()
57
+ user_input = state.get("user_input", "")
58
+ prompt = (
59
+ "Extract all book titles and authors from the following text. "
60
+ "If an author is missing, fill it in using your knowledge. "
61
+ "Output only a JSON list of dicts like this:\n"
62
+ '[{"title": "...", "author": "..."}, ...]\n\n'
63
+ f"User input: {user_input}"
64
+ )
65
+ content = await call_llama3(prompt)
66
+
67
+ print("[extract_books_node] LLM raw response:", content)
68
+ await logger.log(f"[extract_books_node] LLM response: {content}")
69
+
70
+ books = extract_json_array(content)
71
+
72
+ if not books:
73
+ await logger.log("[extract_books_node] Failed to extract valid book list from LLM response.")
74
+ else:
75
+ await logger.log(f"[extract_books_node] Extracted books: {books}")
76
+
77
+ print("[extract_books_node] Extracted books:", books)
78
+
79
+ return {"extracted_books": books}
80
+
81
+ # Node 2
82
+ async def recommend_books_node(state):
83
+ extracted_books = state.get("extracted_books", [])
84
+ reasoning_steps = []
85
+ recommended_books = []
86
+
87
+ print("[recommend_books_node] Extracted books:", extracted_books)
88
+ await logger.log(f"[recommend_books_node] Extracted books: {extracted_books}")
89
+
90
+ if not extracted_books:
91
+ reasoning_steps.append("No books extracted from the input. Check if the extraction failed.")
92
+ return {"recommendations": [], "reasoning": "\n".join(reasoning_steps)}
93
+
94
+ for book in extracted_books:
95
+ title = book.get("title", "")
96
+ author = book.get("author", "")
97
+ query = f"Books similar to '{title}' by {author}"
98
+ reasoning_steps.append(f"Searching DuckDuckGo with query: {query}")
99
+
100
+ print(f"[recommend_books_node] Searching with query: {query}")
101
+ await logger.log(f"Searching DuckDuckGo with query: {query}")
102
+
103
+ search_results = await duckduckgo_search(query)
104
+
105
+ if not search_results:
106
+ reasoning_steps.append(f"No results found for: {query}")
107
+ print(f"[recommend_books_node] No results found for query: {query}")
108
+ await logger.log(f"No results found for query: {query}")
109
+ continue
110
+
111
+ print(f"[recommend_books_node] Results for query '{query}': {search_results}")
112
+ await logger.log(f"Results for query '{query}': {search_results}")
113
+
114
+ for res in search_results:
115
+ recommended_books.append({
116
+ "title": res.get("title", "No Title"),
117
+ "link": res.get("link", ""),
118
+ "snippet": res.get("snippet", "")
119
+ })
120
+ reasoning_steps.append(f"✅ Found: {res.get('title', 'No Title')} ({res.get('link', '')})")
121
+
122
+ if not recommended_books:
123
+ reasoning_steps.append("No recommendations found across all queries.")
124
+
125
+ print("[recommend_books_node] Final recommendations:", recommended_books)
126
+ await logger.log(f"Final recommendations: {recommended_books}")
127
+
128
+ return {
129
+ "recommendations": recommended_books,
130
+ "reasoning": "\n".join(reasoning_steps)
131
+ }
132
+
133
+ # Node 3: Reason about the search results and generate recommendations
134
+ async def reasoning_node(state):
135
+ recommendations = state.get("recommendations", [])
136
+ initial_reasoning = state.get("reasoning", "")
137
+
138
+ if not recommendations:
139
+ final_reasoning = initial_reasoning + "\nNo recommendations found to reason about."
140
+ return {"final_recommendations": [], "final_reasoning": final_reasoning}
141
+
142
+ # Format recommendations as input for the LLM
143
+ recommendations_text = "\n".join(
144
+ [f"Title: {rec['title']}\nLink: {rec['link']}\nSnippet: {rec['snippet']}\n" for rec in recommendations]
145
+ )
146
+
147
+ prompt = (
148
+ "You are a helpful book recommendation expert. You are given a web search result. "
149
+ "Analyze it and select the most relevant book recommendations. Explain why you recommend each book. "
150
+ "Output only a JSON list like this:\n"
151
+ '[{"title": "...", "reason": "...", "link": "..."}, ...]\n\n'
152
+ "Do not add any explanations, comments, or extra text. Only output the JSON list.\n\n"
153
+ f"Books found from search:\n{recommendations_text}"
154
+ )
155
+
156
+
157
+ content = await call_llama3(prompt)
158
+
159
+ print("[reasoning_node] LLM raw response:", content)
160
+ await logger.log(f"[reasoning_node] LLM response: {content}")
161
+
162
+ # Extract JSON-like structure
163
+ final_recommendations = extract_json_array(content)
164
+
165
+ if not final_recommendations:
166
+ await logger.log("[reasoning_node] Failed to extract final recommendations from LLM response.")
167
+ else:
168
+ await logger.log(f"[reasoning_node] Final recommendations: {final_recommendations}")
169
+
170
+ # Combine previous reasoning with the final reasoning
171
+ final_reasoning = initial_reasoning + "\n\nFinal reasoning:\n"
172
+ for rec in final_recommendations:
173
+ final_reasoning += f"✅ Recommended: {rec.get('title', 'Unknown')} - {rec.get('reason', 'No reason provided.')}\n"
174
+
175
+ print("[reasoning_node] Final recommendations extracted:", final_recommendations)
176
+ print("[reasoning_node] Final reasoning:\n", final_reasoning)
177
+ await logger.log(f"[reasoning_node] Final recommendations extracted: {final_recommendations}")
178
+ await logger.log(f"[reasoning_node] Final reasoning:\n{final_reasoning}")
179
+
180
+ return {
181
+ "final_recommendations": final_recommendations,
182
+ "final_reasoning": final_reasoning
183
+ }
184
+
185
+
186
+ # Build the graph
187
+ def build_graph():
188
+ graph = StateGraph(dict)
189
+
190
+ graph.add_node("extract_books", extract_books_node)
191
+ graph.add_node("recommend_books", recommend_books_node)
192
+ graph.add_node("reasoning", reasoning_node)
193
+
194
+ # Define edges
195
+ graph.add_edge("extract_books", "recommend_books")
196
+ graph.add_edge("recommend_books", "reasoning")
197
+ graph.add_edge("reasoning", END)
198
+
199
+ graph.set_entry_point("extract_books")
200
+ return graph.compile()