Upload 8 files
Browse files- app.py +64 -0
- docs/doc1.txt +20 -0
- docs/doc2.txt +18 -0
- docs/doc3.txt +20 -0
- docs/doc4.txt +20 -0
- docs/docs5.txt +22 -0
- retriever.py +121 -0
- tools.py +109 -0
app.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from smolagents import CodeAgent, InferenceClientModel, DuckDuckGoSearchTool
|
| 2 |
+
from tools import DocumentRetrievalTool, DocumentSummarizationTool, CodeExecutionTool
|
| 3 |
+
from retriever import load_document, chunk_text, embed_text, vector_store
|
| 4 |
+
import torch
|
| 5 |
+
from transformers import pipeline
|
| 6 |
+
import gradio as gr
|
| 7 |
+
from huggingface_hub import InferenceClient
|
| 8 |
+
|
| 9 |
+
docs = load_document("docs")
|
| 10 |
+
chunks = []
|
| 11 |
+
for doc in docs:
|
| 12 |
+
chunks.extend(chunk_text(doc["content"]))
|
| 13 |
+
embeddings = embed_text(chunks)
|
| 14 |
+
|
| 15 |
+
ids = [f"chunk_{i}" for i in range(len(chunks))]
|
| 16 |
+
metadatas = [{"source": "unknown", "chunk_index": i} for i in range(len(chunks))]
|
| 17 |
+
|
| 18 |
+
chroma_collection = vector_store(
|
| 19 |
+
collection=None,
|
| 20 |
+
ids=ids,
|
| 21 |
+
documents=chunks,
|
| 22 |
+
metadatas=metadatas,
|
| 23 |
+
embeddings=embeddings
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
doc_tool = DocumentRetrievalTool(
|
| 27 |
+
collection=chroma_collection
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
summarization_pipeline = pipeline(
|
| 31 |
+
task="summarization",
|
| 32 |
+
model="google/pegasus-xsum",
|
| 33 |
+
dtype=torch.float16,
|
| 34 |
+
device=-1
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
summarize_tool = DocumentSummarizationTool(
|
| 38 |
+
summarization_pipeline=summarization_pipeline
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
# model = InferenceClientModel("naveensharma16/my-agent-gaia-test")
|
| 42 |
+
model = InferenceClientModel(
|
| 43 |
+
client=InferenceClient()
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
agent = CodeAgent(
|
| 47 |
+
tools=[doc_tool, summarize_tool, DuckDuckGoSearchTool(), CodeExecutionTool()],
|
| 48 |
+
model=model,
|
| 49 |
+
stream_outputs=True
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
def agent_interface(query):
|
| 53 |
+
return agent.run(query)
|
| 54 |
+
|
| 55 |
+
iface = gr.Interface(
|
| 56 |
+
fn=agent_interface,
|
| 57 |
+
inputs="text",
|
| 58 |
+
outputs="text",
|
| 59 |
+
title="Document QA Agent"
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
if __name__ == "__main__":
|
| 63 |
+
iface.launch(server_name="0.0.0.0", server_port=7860)
|
| 64 |
+
|
docs/doc1.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Agent & Tool-Based Automation Workflow – Using Retrieval & Generation
|
| 2 |
+
|
| 3 |
+
In modern automation systems, agents and tools work in tandem to bridge the gap between user intent and actionable workflows. An agent is the
|
| 4 |
+
orchestrator—it interprets natural language requests, selects the appropriate tools, and supervises the execution of subtasks. Tools are the
|
| 5 |
+
specialised components the agent invokes: for retrieval of knowledge, execution of code, external API calls, or summarisation of results.
|
| 6 |
+
By separating these concerns, you build a flexible system: the agent handles reasoning and decision-making, while tools execute concrete operations.
|
| 7 |
+
|
| 8 |
+
Retrieval plays a pivotal role in this architecture. Before generation, the system identifies relevant context or data—perhaps past automation scripts,
|
| 9 |
+
documentation, or process logs—using a retriever tool. That context is fed into the generation stage, where the agent or model crafts a response or
|
| 10 |
+
action plan grounded in the retrieved evidence. This Retrieval-Augmented Generation (RAG) pattern ensures that the system doesn’t hallucinate its
|
| 11 |
+
way to an answer but bases its output on real, indexed information.
|
| 12 |
+
|
| 13 |
+
Tool invocation adds another dimension of operational power. Once the agent decides “I need this tool”, it passes structured inputs to the tool,
|
| 14 |
+
receives a result, and continues its reasoning with that result. For example: user says “Create a script that uploads the latest financial report.”
|
| 15 |
+
The agent retrieves the report template, invokes a code-generation tool, reviews the generated script via another tool, and then executes or delivers
|
| 16 |
+
the final output. Each step is visible, modular, and auditable.
|
| 17 |
+
|
| 18 |
+
Finally, good workflows enforce guardrails, feedback loops and evaluation. The agent should know when to escalate to a human, how to log tool usage
|
| 19 |
+
(for observability), and how to evaluate whether the output meets criteria (accuracy, relevance, correctness). Embedding these practices from the
|
| 20 |
+
start allows the system to evolve—improving over time, scaling across domains, and adapting to new use-cases without breaking.
|
docs/doc2.txt
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Best Practices for Automated Documentation
|
| 2 |
+
|
| 3 |
+
In today’s fast-paced digital landscape, documentation is no longer a static “write once and forget” artifact—it must evolve dynamically and continuously
|
| 4 |
+
alongside the systems it describes. Automated documentation systems help organisations maintain accuracy, consistency and accessibility, especially when
|
| 5 |
+
handling high volumes of documents or frequent process changes. By establishing clear documentation standards, integrating automation tools into existing
|
| 6 |
+
data stacks, leveraging metadata, and implementing version control, automated documentation becomes a powerful enabler of transparency and efficiency.
|
| 7 |
+
|
| 8 |
+
One of the key steps in successful documentation automation is selecting the right tools and aligning them to your organisational environment. For
|
| 9 |
+
instance, you should choose software that seamlessly integrates with your database, document management system or ERP, supports the formats you use
|
| 10 |
+
(e.g., PDF, HTML, Markdown) and offers robust versioning and audit trails. Further, good practice requires ongoing review and update cycles—automated
|
| 11 |
+
doesn’t mean “never verify”. Regular audits, monitoring of documentation churn and verifying that the generated material reflects the live system
|
| 12 |
+
are critical.
|
| 13 |
+
|
| 14 |
+
Automated documentation also benefits from thoughtful metadata design. By capturing context such as authorship, timestamps, document versions, relevant
|
| 15 |
+
stakeholders, and workflows, your system supports better searchability, traceability and governance. Automation that’s aware of metadata reduces
|
| 16 |
+
duplication and makes retrieval faster and more reliable. Finally, security and governance cannot be an after-thought. Handling documentation—especially
|
| 17 |
+
in regulated domains or with sensitive information—requires access controls, encryption, audit logs and compliance with data-retention policies.
|
| 18 |
+
Treatment of documentation as a critical asset, with clear ownership and management processes, ensures your automated approach remains robust.
|
docs/doc3.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
AI Agent Workflows – An Introduction & Best Practices
|
| 2 |
+
|
| 3 |
+
Modern AI agents are more than chatbots—they are systems capable of autonomously performing tasks, connecting to tools, retrieving knowledge, and making
|
| 4 |
+
decisions. At their core, an agent takes input (a user request), reasons about what to do (which tools to call or which data to fetch), executes
|
| 5 |
+
operations, and returns output. This workflow involves: interpreting intent → selecting action → retrieving context/data → executing tools/commands
|
| 6 |
+
→ generating a response or action. By mapping out this sequence clearly, you build agents that are predictable and effective.
|
| 7 |
+
|
| 8 |
+
One of the most important steps in agent workflows is retrieval of context. When an agent is asked a question or given a task, it often needs data
|
| 9 |
+
beyond its training—such as documents, APIs, logs or databases. A retrieval component finds relevant pieces of evidence, which are then used by the
|
| 10 |
+
generation or tool-execution stage. Without such context, the agent risks hallucinating or producing irrelevant output.
|
| 11 |
+
|
| 12 |
+
Equally important is tool invocation and orchestration. Once the agent has retrieved relevant context, it may decide to call a tool—for example,
|
| 13 |
+
running a script, accessing a database, invoking an API, or performing a calculation. The workflow must clearly define how tools are selected, how
|
| 14 |
+
inputs are formed, how outputs are handled, and how errors or unexpected results are managed. Modularising tools and defining interfaces make this
|
| 15 |
+
step robust and maintainable.
|
| 16 |
+
|
| 17 |
+
Finally, building production-worthy agent workflows demands governance, observability and iterative improvement. Agents should log their decisions
|
| 18 |
+
(which tool was called, what data was retrieved), monitor performance (latency, accuracy, failures) and allow for human intervention when needed.
|
| 19 |
+
Best practice also says to start with narrow-scoped tasks (one reliable function) and expand gradually, ensuring each stage works before scaling.
|
| 20 |
+
With these practices, your agent workflows become both reliable and extendable.
|
docs/doc4.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
AI Agent Workflows – An Introduction & Best Practices
|
| 2 |
+
|
| 3 |
+
Modern AI agents are more than chatbots—they are systems capable of autonomously performing tasks, connecting to tools, retrieving knowledge, and making
|
| 4 |
+
decisions. At their core, an agent takes input (a user request), reasons about what to do (which tools to call or which data to fetch), executes
|
| 5 |
+
operations, and returns output. This workflow involves: interpreting intent → selecting action → retrieving context/data → executing tools/commands →
|
| 6 |
+
generating a response or action. By mapping out this sequence clearly, you build agents that are predictable and effective.
|
| 7 |
+
|
| 8 |
+
One of the most important steps in agent workflows is retrieval of context. When an agent is asked a question or given a task, it often needs data
|
| 9 |
+
beyond its training—such as documents, APIs, logs or databases. A retrieval component finds relevant pieces of evidence, which are then used by the
|
| 10 |
+
generation or tool-execution stage. Without such context, the agent risks hallucinating or producing irrelevant output.
|
| 11 |
+
|
| 12 |
+
Equally important is tool invocation and orchestration. Once the agent has retrieved relevant context, it may decide to call a tool—for example,
|
| 13 |
+
running a script, accessing a database, invoking an API, or performing a calculation. The workflow must clearly define how tools are selected, how
|
| 14 |
+
inputs are formed, how outputs are handled, and how errors or unexpected results are managed. Modularising tools and defining interfaces make this
|
| 15 |
+
step robust and maintainable.
|
| 16 |
+
|
| 17 |
+
Finally, building production-worthy agent workflows demands governance, observability and iterative improvement. Agents should log their decisions
|
| 18 |
+
(which tool was called, what data was retrieved), monitor performance (latency, accuracy, failures) and allow for human intervention when needed.
|
| 19 |
+
Best practice also says to start with narrow-scoped tasks (one reliable function) and expand gradually, ensuring each stage works before scaling.
|
| 20 |
+
With these practices, your agent workflows become both reliable and extendable.
|
docs/docs5.txt
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
RAG Evaluation – Best Practices for Retrieval-Augmented Generation Systems
|
| 2 |
+
|
| 3 |
+
Evaluation is a critical step when building a Retrieval-Augmented Generation (RAG) system. A successful RAG system must not only retrieve relevant
|
| 4 |
+
documents but also generate accurate, grounded responses based on that context. Without proper evaluation, errors in retrieval or generation can slip
|
| 5 |
+
into production, causing misleading answers or user frustration.
|
| 6 |
+
|
| 7 |
+
First, treat the retrieval and generation components separately. For retrieval, measure how well the system finds useful documents: metrics like
|
| 8 |
+
precision@k (how many of the top k retrieved are actually relevant) and recall@k (how many relevant documents were retrieved) help you locate
|
| 9 |
+
weaknesses in your vector store or embedding model. For generation, assess whether the answer is correct, relevant, coherent and faithful to the
|
| 10 |
+
retrieved context. If your agent produces fluent text but it’s not grounded in the retrieved material, you’ll face trust issues.
|
| 11 |
+
|
| 12 |
+
Second, build a structured test set early. Select a variety of realistic questions that reflect how users will use the system. For each, define
|
| 13 |
+
expected outcomes or “gold” answers when possible. By using the same test set across iterations, you can compare performance when you change chunking
|
| 14 |
+
methods, vector stores, or prompts. This consistency ensures that improvements are measurable and meaningful.
|
| 15 |
+
|
| 16 |
+
Third, automate the evaluation process. Setup scripts or pipelines that run the test set, compute metrics, record results, and plot trends. This
|
| 17 |
+
way you can track regression, monitor when performance drops (for example if the knowledge base changes), and set thresholds for when to alert for
|
| 18 |
+
human review. Continuous monitoring is especially important as your document base grows or becomes dynamic.
|
| 19 |
+
|
| 20 |
+
Finally, remember that evaluation is ongoing—once you deploy your agent, user behaviour will evolve, documents will change, and queries will shift.
|
| 21 |
+
Plan periodic re-evaluation (e.g., monthly or after major updates), refresh test sets, and maintain logs of system decisions. By doing so, you ensure
|
| 22 |
+
your RAG assistant stays reliable and effective over time.
|
retriever.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import List, Dict, Any
|
| 3 |
+
from sentence_transformers import SentenceTransformer
|
| 4 |
+
import chromadb
|
| 5 |
+
|
| 6 |
+
FOLDER_PATH = "/home/nishtha/document-based-assistant/docs"
|
| 7 |
+
|
| 8 |
+
model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 9 |
+
chroma_client = chromadb.Client()
|
| 10 |
+
|
| 11 |
+
def load_document(docs_path: str = FOLDER_PATH) -> List[Dict[str, Any]]:
|
| 12 |
+
"""
|
| 13 |
+
Load text documents from docs_path.
|
| 14 |
+
"""
|
| 15 |
+
documents = []
|
| 16 |
+
for doc in os.listdir(docs_path):
|
| 17 |
+
filepath = os.path.join(docs_path, doc)
|
| 18 |
+
with open(filepath, 'r', encoding='utf-8') as file:
|
| 19 |
+
content = file.read()
|
| 20 |
+
documents.append({
|
| 21 |
+
"filename": doc,
|
| 22 |
+
"content": content
|
| 23 |
+
})
|
| 24 |
+
return documents
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def chunk_text(content: str, max_tokens: int = 250, overlap_tokens: int = 50) -> List[str]:
|
| 28 |
+
"""
|
| 29 |
+
Clean, preprocess and split a long text into chunks of up to max_tokens,
|
| 30 |
+
with overlap of overlap_tokens between consecutive chunks to preserve context.
|
| 31 |
+
"""
|
| 32 |
+
content = content.strip()
|
| 33 |
+
content = " ".join(content.split())
|
| 34 |
+
content = content.lower()
|
| 35 |
+
|
| 36 |
+
words = content.split()
|
| 37 |
+
chunks = []
|
| 38 |
+
start = 0
|
| 39 |
+
length = len(words)
|
| 40 |
+
|
| 41 |
+
while start < length:
|
| 42 |
+
end = min(start + max_tokens, length)
|
| 43 |
+
chunk = " ".join(words[start:end])
|
| 44 |
+
chunks.append(chunk)
|
| 45 |
+
|
| 46 |
+
start += (max_tokens - overlap_tokens)
|
| 47 |
+
|
| 48 |
+
return chunks
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def embed_text(chunks: List[str], batch_size: int = 32) -> List[List[float]]:
|
| 52 |
+
"""
|
| 53 |
+
Embed text chunks using the pre-loaded SentenceTransformer model.
|
| 54 |
+
Returns a list of embeddings.
|
| 55 |
+
"""
|
| 56 |
+
embeddings = model.encode(chunks, batch_size=batch_size, convert_to_tensor=False, show_progress_bar=True)
|
| 57 |
+
return embeddings
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def vector_store(collection, ids: list, documents: list, metadatas: list, embeddings: list = None):
|
| 61 |
+
"""
|
| 62 |
+
Create or get a ChromaDB collection for storing embeddings.
|
| 63 |
+
"""
|
| 64 |
+
collection = chroma_client.get_or_create_collection(name="document_assistant_collection")
|
| 65 |
+
collection.add(
|
| 66 |
+
ids=ids,
|
| 67 |
+
documents=documents,
|
| 68 |
+
metadatas=metadatas,
|
| 69 |
+
embeddings=embeddings,
|
| 70 |
+
)
|
| 71 |
+
return collection
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def retrieve_query(query: str, collection, top_k: int = 5) -> List[Dict[str, Any]]:
|
| 75 |
+
"""
|
| 76 |
+
Retrieve the top_k most relevant document chunks for the given query from the collection."""
|
| 77 |
+
clean_q = query.strip().lower()
|
| 78 |
+
q_emb = model.encode([clean_q], convert_to_tensor=False)[0]
|
| 79 |
+
results = collection.query(
|
| 80 |
+
query_embeddings=[q_emb],
|
| 81 |
+
n_results=top_k,
|
| 82 |
+
include=["documents", "metadatas", "distances"]
|
| 83 |
+
)
|
| 84 |
+
retrieved = []
|
| 85 |
+
docs_list = results["documents"][0]
|
| 86 |
+
metas_list = results["metadatas"][0]
|
| 87 |
+
dists_list = results["distances"][0]
|
| 88 |
+
for text, meta, dist in zip(docs_list, metas_list, dists_list):
|
| 89 |
+
retrieved.append({
|
| 90 |
+
"source": meta.get("source"),
|
| 91 |
+
"text": text,
|
| 92 |
+
"score": dist
|
| 93 |
+
})
|
| 94 |
+
return retrieved
|
| 95 |
+
|
| 96 |
+
if __name__ == "__main__":
|
| 97 |
+
# Get or create the collection
|
| 98 |
+
collection = chroma_client.get_or_create_collection(name="document_assistant_collection")
|
| 99 |
+
|
| 100 |
+
# Load and prepare documents
|
| 101 |
+
docs = load_document()
|
| 102 |
+
ids, chunks, metas = [], [], []
|
| 103 |
+
for d in docs:
|
| 104 |
+
for idx, c in enumerate(chunk_text(d["content"], max_tokens=250, overlap_tokens=50)):
|
| 105 |
+
ids.append(f"{d['filename']}_chunk{idx}")
|
| 106 |
+
chunks.append(c)
|
| 107 |
+
metas.append({"source": d["filename"], "chunk_index": idx})
|
| 108 |
+
|
| 109 |
+
# Embed chunks and add to vector store
|
| 110 |
+
embeddings = embed_text(chunks, batch_size=32)
|
| 111 |
+
coll = vector_store(collection, ids=ids, documents=chunks, metadatas=metas, embeddings=embeddings)
|
| 112 |
+
|
| 113 |
+
# Interactive question loop
|
| 114 |
+
# while True:
|
| 115 |
+
# q = input("Your question (or 'exit'): ")
|
| 116 |
+
# if q.lower() in ("exit", "quit"):
|
| 117 |
+
# break
|
| 118 |
+
# results = retrieve_query(q, coll, top_k=5)
|
| 119 |
+
# for r in results:
|
| 120 |
+
# print(f"Source: {r['source']} | Score: {r['score']}\nText: {r['text']}\n---")
|
| 121 |
+
|
tools.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from smolagents import Tool
|
| 2 |
+
from typing import Dict, Any, List
|
| 3 |
+
from retriever import retrieve_query
|
| 4 |
+
import io
|
| 5 |
+
from contextlib import redirect_stdout
|
| 6 |
+
|
| 7 |
+
class DocumentRetrievalTool(Tool):
|
| 8 |
+
"""
|
| 9 |
+
A tool for performing semantic search across a document knowledge base stored in ChromaDB.
|
| 10 |
+
It accepts a query and retrieves the most relevant document chunks.
|
| 11 |
+
"""
|
| 12 |
+
name = "document_retrieval"
|
| 13 |
+
description = (
|
| 14 |
+
"Use this tool to search for specific, relevant information within the loaded document set. "
|
| 15 |
+
"Always use this when the user's question relates to the content of the documents."
|
| 16 |
+
)
|
| 17 |
+
inputs = {
|
| 18 |
+
"query": {
|
| 19 |
+
"type": "string",
|
| 20 |
+
"description": "The search query, which must be a specific question or topic related to the document's content."
|
| 21 |
+
}
|
| 22 |
+
}
|
| 23 |
+
output_type = "string"
|
| 24 |
+
|
| 25 |
+
def __init__(self, collection: Any):
|
| 26 |
+
self.collection = collection
|
| 27 |
+
super().__init__()
|
| 28 |
+
|
| 29 |
+
def forward(self, query: str) -> str:
|
| 30 |
+
"""
|
| 31 |
+
Performs a query using the custom retrieval function and returns the results
|
| 32 |
+
formatted for the agent.
|
| 33 |
+
"""
|
| 34 |
+
retrieved_results: List[Dict[str, Any]] = retrieve_query(query, self.collection, top_k=5)
|
| 35 |
+
context_parts = []
|
| 36 |
+
for result in retrieved_results:
|
| 37 |
+
context_parts.append(
|
| 38 |
+
f"Source ({result['source']}): {result['text']}"
|
| 39 |
+
)
|
| 40 |
+
context = "\n---\n".join(context_parts)
|
| 41 |
+
|
| 42 |
+
return (
|
| 43 |
+
f"Retrieved context from document:\n\n{context}\n\n"
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
class DocumentSummarizationTool(Tool):
|
| 47 |
+
"""
|
| 48 |
+
A tool that summarizes a given document text using pre-trained summarization model.
|
| 49 |
+
"""
|
| 50 |
+
name = "document_summarization"
|
| 51 |
+
description = (
|
| 52 |
+
"Use this tool to summarize a loaded document set."
|
| 53 |
+
)
|
| 54 |
+
inputs = {
|
| 55 |
+
"document_text": {
|
| 56 |
+
"type": "string",
|
| 57 |
+
"description": "The document text to be summarized."
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
output_type = "string"
|
| 61 |
+
|
| 62 |
+
def __init__(self, summarization_pipeline: Any):
|
| 63 |
+
self.summarization_pipeline = summarization_pipeline
|
| 64 |
+
super().__init__()
|
| 65 |
+
|
| 66 |
+
def forward(self, document_text: str) -> str:
|
| 67 |
+
"""
|
| 68 |
+
Performs summarization using the provided summarization pipeline.
|
| 69 |
+
"""
|
| 70 |
+
summary_output = self.summarization_pipeline(document_text)
|
| 71 |
+
return summary_output[0]["summary_text"]
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class CodeExecutionTool(Tool):
|
| 75 |
+
"""
|
| 76 |
+
A sandboxed tool for executing Python code (e.g., for calculations,
|
| 77 |
+
data manipulation, or simple logic puzzles) that the LLM might struggle with).
|
| 78 |
+
"""
|
| 79 |
+
name: str = "python_interpreter"
|
| 80 |
+
description: str = (
|
| 81 |
+
"A Python interpreter used to run short snippets of code for calculations "
|
| 82 |
+
"or logic. The input must be valid Python code. The tool captures and "
|
| 83 |
+
"returns any printed output."
|
| 84 |
+
)
|
| 85 |
+
inputs = {
|
| 86 |
+
"code": {
|
| 87 |
+
"type": "string",
|
| 88 |
+
"description": "The Python code to execute. Must be a single code block."
|
| 89 |
+
}
|
| 90 |
+
}
|
| 91 |
+
output_type = "string"
|
| 92 |
+
|
| 93 |
+
def forward(self, code: str) -> str:
|
| 94 |
+
"""
|
| 95 |
+
Executes the given Python code in a restricted environment.
|
| 96 |
+
"""
|
| 97 |
+
safe_builtins = {'print': print, 'len': len, 'sum': sum, 'min': min, 'max': max, 'range': range, 'str': str, 'int': int, 'float': float}
|
| 98 |
+
safe_globals = {'__builtins__': safe_builtins}
|
| 99 |
+
output_buffer = io.StringIO()
|
| 100 |
+
|
| 101 |
+
try:
|
| 102 |
+
with redirect_stdout(output_buffer):
|
| 103 |
+
exec(code, safe_globals)
|
| 104 |
+
|
| 105 |
+
output = output_buffer.getvalue()
|
| 106 |
+
return f"Code Output:\n{output.strip()}" if output else "No output. Did you forget print()?"
|
| 107 |
+
|
| 108 |
+
except Exception as e:
|
| 109 |
+
return f"Code Execution Error: {type(e).__name__}: {str(e)}"
|