File size: 18,169 Bytes
e753b9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c9ab476
27d6c4f
 
 
 
e753b9f
5856cb0
 
 
b1cd264
 
 
 
 
5856cb0
b1cd264
 
 
 
 
 
e753b9f
fe36046
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c9ab476
 
 
e753b9f
88f8d22
e753b9f
 
 
88f8d22
b1cd264
88f8d22
b1cd264
 
 
 
 
 
 
 
 
 
 
e753b9f
 
88f8d22
e753b9f
 
 
88f8d22
b1cd264
f1cb55d
 
 
b1cd264
 
 
 
 
 
 
 
 
 
 
e753b9f
 
88f8d22
e753b9f
 
 
88f8d22
b1cd264
88f8d22
b1cd264
 
 
 
 
 
 
 
 
 
 
e753b9f
27d6c4f
 
 
 
 
 
 
 
 
 
 
e753b9f
fe36046
e753b9f
 
 
 
 
 
 
 
b1cd264
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e753b9f
 
 
 
 
27d6c4f
e753b9f
b1cd264
 
e753b9f
27d6c4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe36046
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e753b9f
 
 
 
 
 
 
 
 
0989d5f
e753b9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b1cd264
 
 
 
 
 
 
 
 
e753b9f
 
fe36046
b1cd264
 
 
 
 
c9ab476
fe36046
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c9ab476
fe36046
c9ab476
 
 
 
 
 
 
 
fe36046
 
c9ab476
 
 
 
 
 
 
 
 
 
fe36046
 
c9ab476
fe36046
 
c9ab476
 
fe36046
 
 
 
c9ab476
 
fe36046
 
c9ab476
 
 
 
 
 
 
b1cd264
 
 
e753b9f
 
 
 
 
27d6c4f
 
fe36046
27d6c4f
fe36046
e753b9f
fe36046
 
27d6c4f
 
fe36046
27d6c4f
fe36046
27d6c4f
 
 
 
 
 
e753b9f
 
 
 
 
 
 
 
 
 
fe36046
e753b9f
 
 
 
 
5856cb0
e753b9f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
"""LangGraph Agent"""
import os
from dotenv import load_dotenv
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition
from langgraph.prebuilt import ToolNode
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_groq import ChatGroq
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.document_loaders import WikipediaLoader
from langchain_community.document_loaders import ArxivLoader
from langchain_community.vectorstores import SupabaseVectorStore
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.tools import tool
from langchain.tools.retriever import create_retriever_tool
from supabase.client import Client, create_client
import requests  # NEW: for HTTP requests to scoring API
from dataclasses import dataclass
import time  # For timestamp in code agent wrapper
from code_agent import run_agent  # Compiled code-interpreter graph helper
from langchain_core.messages import AIMessage

from langfuse.langchain import CallbackHandler

# Initialize Langfuse CallbackHandler for LangGraph/Langchain (tracing)
try:
    langfuse_handler = CallbackHandler()
except Exception as e:
    print(f"Warning: Could not initialize Langfuse handler: {e}")
    langfuse_handler = None

# Load environment variables - try multiple files
load_dotenv()  # Try .env first
load_dotenv("env.local")  # Try env.local as backup

print(f"SUPABASE_URL loaded: {bool(os.environ.get('SUPABASE_URL'))}")
print(f"GROQ_API_KEY loaded: {bool(os.environ.get('GROQ_API_KEY'))}")

# ---------------------------------------------------------------------------
# Lightweight in-memory caches and constants for smarter retrieval/ingest
# ---------------------------------------------------------------------------
import hashlib  # NEW: for hashing payloads / queries

TTL = 300  # seconds – how long we keep similarity-search results
SIMILARITY_THRESHOLD = 0.85  # cosine score above which we assume we already know the answer

# (query_hash -> (timestamp, results))
QUERY_CACHE: dict[str, tuple[float, list]] = {}
# task IDs whose attachments we already attempted to download this session
PROCESSED_TASKS: set[str] = set()
# hash_ids of Q/A payloads we have already upserted during this session
SEEN_HASHES: set[str] = set()

# Base URL of the scoring API (duplicated here to avoid circular import with basic_agent)
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"

@tool
def wiki_search(input: str) -> str:
    """Search Wikipedia for a query and return maximum 2 results.
    
    Args:
        input: The search query."""
    try:
        search_docs = WikipediaLoader(query=input, load_max_docs=2).load()
        if not search_docs:
            return {"wiki_results": "No Wikipedia results found for the query."}
        formatted_search_docs = "\n\n---\n\n".join(
            [
                f'<Document source="{doc.metadata.get("source", "Unknown")}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
                for doc in search_docs
            ])
        return {"wiki_results": formatted_search_docs}
    except Exception as e:
        print(f"Error in wiki_search: {e}")
        return {"wiki_results": f"Error searching Wikipedia: {e}"}

@tool
def web_search(input: str) -> str:
    """Search Tavily for a query and return maximum 3 results.
    
    Args:
        input: The search query."""
    try:
        # TavilySearchResults.invoke expects the query string as its first ("input") argument.
        # Passing it positionally avoids the earlier `missing 1 required positional argument: 'input'` error.
        search_docs = TavilySearchResults(max_results=3).invoke(input)
        if not search_docs:
            return {"web_results": "No web search results found for the query."}
        formatted_search_docs = "\n\n---\n\n".join(
            [
                f'<Document source="{doc.get("url", "Unknown")}" />\n{doc.get("content", "No content")}\n</Document>'
                for doc in search_docs
            ])
        return {"web_results": formatted_search_docs}
    except Exception as e:
        print(f"Error in web_search: {e}")
        return {"web_results": f"Error searching web: {e}"}

@tool
def arvix_search(input: str) -> str:
    """Search Arxiv for a query and return maximum 3 result.
    
    Args:
        input: The search query."""
    try:
        search_docs = ArxivLoader(query=input, load_max_docs=3).load()
        if not search_docs:
            return {"arvix_results": "No Arxiv results found for the query."}
        formatted_search_docs = "\n\n---\n\n".join(
            [
                f'<Document source="{doc.metadata.get("source", "Unknown")}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
                for doc in search_docs
            ])
        return {"arvix_results": formatted_search_docs}
    except Exception as e:
        print(f"Error in arvix_search: {e}")
        return {"arvix_results": f"Error searching Arxiv: {e}"}

@tool
def run_python(input: str) -> str:
    """Execute Python code in a restricted sandbox (code-interpreter).

    Pass **any** coding or file-manipulation task here and the agent will
    compute the answer by running Python. The entire standard library is NOT
    available; heavy networking is disabled. Suitable for: math, data-frames,
    small file parsing, algorithmic questions.
    """
    return run_agent(input)

# load the system prompt from the file
with open("./prompts/system_prompt.txt", "r", encoding="utf-8") as f:
    system_prompt = f.read()

# System message
sys_msg = SystemMessage(content=system_prompt)

# build a retriever
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") #  dim=768

# Try to create Supabase client with error handling
try:
    supabase_url = os.environ.get("SUPABASE_URL")
    supabase_key = os.environ.get("SUPABASE_SERVICE_KEY")
    
    if not supabase_url or not supabase_key:
        print("Warning: Supabase credentials not found, vector store will be disabled")
        vector_store = None
        create_retriever_tool = None
    else:
        supabase: Client = create_client(supabase_url, supabase_key)
        vector_store = SupabaseVectorStore(
            client=supabase,
            embedding= embeddings,
            table_name="documents",
            query_name="match_documents_langchain",
        )
        create_retriever_tool = create_retriever_tool(
            retriever=vector_store.as_retriever(),
            name="Question Search",
            description="A tool to retrieve similar questions from a vector store.",
        )
except Exception as e:
    print(f"Warning: Could not initialize Supabase vector store: {e}")
    vector_store = None
    create_retriever_tool = None

tools = [
    wiki_search,
    web_search,
    arvix_search,
    run_python,
]
if create_retriever_tool:
    tools.append(create_retriever_tool)

# ---------------------------------------------------------------------------
# Code-interpreter integration helpers
# ---------------------------------------------------------------------------

from code_agent import run_agent  # Executes the compiled code-interpreter graph


def _needs_code(state: dict) -> bool:  # type: ignore[override]
    """Heuristic: does *state* look like a coding request?"""
    messages = state.get("messages", [])
    if not messages:
        return False
    last_content = messages[-1].content.lower()
    triggers = [
        "```python",
        "write python",
        "run this code",
        "file manipulation",
        "csv",
        "pandas",
        "json",
        "plot",
        "fibonacci",
    ]
    return any(t in last_content for t in triggers)


def _code_exec_wrapper(state: dict):  # type: ignore[override]
    """Delegate the user query to the sandboxed Python interpreter."""
    # Get the last human message's content (fallback to empty string)
    human_msgs = [m.content for m in state.get("messages", []) if m.type == "human"]
    query = "\n\n".join(human_msgs)

    # Execute code-interpreter with full context (question + attachments)
    result = run_agent(query)
    # Persist the raw stdout so we can convert it to an AI message downstream
    return {"code_result": result}


def _code_to_message(state: dict):  # type: ignore[override]
    """Turn the interpreter's stdout into an AIMessage so the LLM can see it."""
    from langchain_core.messages import AIMessage  # local import to avoid cycles

    if not state.get("code_result"):
        return {}
    return {"messages": [AIMessage(content=state["code_result"])]}

# ---------------------------------------------------------------------------
# NEW: Ingest node – write back to vector store if `should_ingest` flag set
# ---------------------------------------------------------------------------
def ingest(state: MessagesState):
    """Persist helpful Q/A pairs (and any attachment snippet) to the vector DB."""
    try:
        if not state.get("should_ingest") or not vector_store:
            return {}

        question_text = state["messages"][0].content
        answer_text = state["messages"][-1].content
        attach_snippets = "\n\n".join(
            m.content for m in state["messages"] if str(m.content).startswith("Attached file content")
        )
        payload = f"Question:\n{question_text}\n\nAnswer:\n{answer_text}"
        if attach_snippets:
            payload += f"\n\n{attach_snippets}"

        hash_id = hashlib.sha256(payload.encode()).hexdigest()
        if hash_id in SEEN_HASHES:
            print("Ingest: Duplicate payload within session – skip")
            return {}
        SEEN_HASHES.add(hash_id)
        vector_store.add_texts([payload], metadatas=[{"hash_id": hash_id, "timestamp": time.time()}])
        print("Ingest: Stored new Q/A pair in vector store")
    except Exception as ing_e:
        print(f"Ingest node: Error while upserting – {ing_e}")
    return {}

# Build graph function
def build_graph(provider: str = "groq"):
    """Build the graph"""
    # Load environment variables from .env file
    if provider == "google":
        # Google Gemini
        llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
    elif provider == "groq":
        # Groq https://console.groq.com/docs/models
        llm = ChatGroq(model="qwen-qwq-32b", temperature= 0.6) # optional : qwen-qwq-32b gemma2-9b-it
    elif provider == "huggingface":
        # TODO: Add huggingface endpoint
        llm = ChatHuggingFace(
            llm=HuggingFaceEndpoint(
                url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
                temperature=0,
            ),
        )
    else:
        raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.")
    # Bind tools to LLM
    llm_with_tools = llm.bind_tools(tools)

    # Node
    def assistant(state: MessagesState):
        """Assistant node"""
        try:
            print(f"Assistant node: Processing {len(state['messages'])} messages")
            result = llm_with_tools.invoke(state["messages"])
            print(f"Assistant node: LLM returned result type: {type(result)}")
            return {"messages": [result]}
        except Exception as e:
            print(f"Error in assistant node: {e}")
            error_msg = AIMessage(content=f"I encountered an error: {e}")
            return {"messages": [error_msg]}
    
    def retriever(state: MessagesState):
        """Retriever node (smart fetch + similarity search)"""
        try:
            print(f"Retriever node: Processing {len(state['messages'])} messages")
            if not state["messages"]:
                print("Retriever node: No messages in state")
                return {"messages": [sys_msg]}

            # Extract the *latest* user query content
            query_content = state["messages"][-1].content

            # ----------------------------------------------------------------------------------
            # Similarity search with an in-process cache
            # ----------------------------------------------------------------------------------
            q_hash = hashlib.sha256(query_content.encode()).hexdigest()
            now = time.time()
            if q_hash in QUERY_CACHE and now - QUERY_CACHE[q_hash][0] < TTL:
                similar_question = QUERY_CACHE[q_hash][1]
                print("Retriever node: Cache hit for similarity search")
            else:
                if vector_store:
                    print(f"Retriever node: Searching vector store for similar questions …")
                    try:
                        similar_question = vector_store.similarity_search_with_relevance_scores(query_content, k=2)
                    except Exception as vs_e:
                        print(f"Retriever node: Vector store search error – {vs_e}")
                        similar_question = []
                    QUERY_CACHE[q_hash] = (now, similar_question)
                else:
                    similar_question = []
                    print("Retriever node: Vector store not available, skipping similarity search")

            # Decide whether this exchange should later be ingested
            top_score = similar_question[0][1] if similar_question else 0.0
            state["should_ingest"] = top_score < SIMILARITY_THRESHOLD

            # ----------------------------------------------------------------------------------
            # Attachment fetch (only once per task_id during this session)
            # ----------------------------------------------------------------------------------
            attachment_msg = None
            matched_task_id = None
            try:
                resp = requests.get(f"{DEFAULT_API_URL}/questions", timeout=30)
                resp.raise_for_status()
                questions = resp.json()
                for q in questions:
                    if str(q.get("question")).strip() == str(query_content).strip():
                        matched_task_id = str(q.get("task_id"))
                        break
                if matched_task_id and matched_task_id not in PROCESSED_TASKS:
                    print(f"Retriever node: Downloading attachment for task {matched_task_id} …")
                    file_resp = requests.get(f"{DEFAULT_API_URL}/files/{matched_task_id}", timeout=60)
                    if file_resp.status_code == 200 and file_resp.content:
                        try:
                            file_text = file_resp.content.decode("utf-8", errors="replace")
                        except Exception:
                            file_text = "(binary or non-UTF8 file omitted)"
                        MAX_CHARS = 8000
                        if len(file_text) > MAX_CHARS:
                            file_text = file_text[:MAX_CHARS] + "\n… (truncated)"
                        attachment_msg = HumanMessage(content=f"Attached file content for task {matched_task_id}:\n```python\n{file_text}\n```")
                        print("Retriever node: Attachment added to context")
                        state["should_ingest"] = True  # ensure we store this new info
                    else:
                        print(f"Retriever node: No attachment for task {matched_task_id} (status {file_resp.status_code})")
                    PROCESSED_TASKS.add(matched_task_id)
            except Exception as api_e:
                print(f"Retriever node: Error while fetching attachment – {api_e}")

            # ----------------------------------------------------------------------------------
            # Build message list for downstream LLM
            # ----------------------------------------------------------------------------------
            msgs = [sys_msg] + state["messages"]
            if similar_question:
                example_doc = similar_question[0][0] if isinstance(similar_question[0], tuple) else similar_question[0]
                example_msg = HumanMessage(content=f"Here I provide a similar question and answer for reference: \n\n{example_doc.page_content}")
                msgs.append(example_msg)
                print("Retriever node: Added example message from similar question")

            if attachment_msg:
                msgs.append(attachment_msg)

            return {"messages": msgs}
        except Exception as e:
            print(f"Error in retriever node: {e}")
            return {"messages": [sys_msg] + state["messages"]}

    builder = StateGraph(MessagesState)
    builder.add_node("retriever", retriever)
    builder.add_node("assistant", assistant)
    builder.add_node("tools", ToolNode(tools))
    builder.add_node("code_exec", _code_exec_wrapper)
    builder.add_node("code_to_message", _code_to_message)
    builder.add_node("ingest", ingest)

    # Edge layout
    builder.add_edge(START, "retriever")
    builder.add_edge("retriever", "assistant")

    # Conditional branch: decide whether to run code interpreter
    builder.add_conditional_edges(
        "assistant",
        _needs_code,
        {True: "code_exec", False: "ingest"},
    )

    # Flow after code execution: inject result then resume chat
    builder.add_edge("code_exec", "code_to_message")
    builder.add_edge("code_to_message", "assistant")

    builder.add_conditional_edges(
        "assistant",
        tools_condition,
    )
    builder.add_edge("tools", "assistant")

    # Compile graph
    return builder.compile()

# test
if __name__ == "__main__":      
    question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
    # Build the graph
    graph = build_graph(provider="groq")
    # Run the graph
    messages = [HumanMessage(content=question)]
    messages = graph.invoke({"messages": messages}, config={"callbacks": [langfuse_handler]})
    for m in messages["messages"]:
        m.pretty_print()