File size: 6,874 Bytes
f791164
 
 
0118ff8
 
 
 
 
 
f9bd7be
dbb07de
0118ff8
 
d0210fc
0118ff8
 
 
 
 
 
f9bd7be
 
0118ff8
dbb07de
f8efad0
 
 
 
50351e8
 
 
f9bd7be
 
0118ff8
 
 
dbb07de
0118ff8
 
 
 
 
 
 
 
f9bd7be
 
0118ff8
dbb07de
0118ff8
 
 
 
f9bd7be
 
dbb07de
0118ff8
f9bd7be
 
0118ff8
dbb07de
0118ff8
 
 
 
f9bd7be
0118ff8
dbb07de
0118ff8
f9bd7be
 
0118ff8
dbb07de
0118ff8
 
 
f9bd7be
 
0118ff8
dbb07de
0118ff8
f9bd7be
 
0118ff8
dbb07de
0118ff8
 
f9bd7be
f791164
 
 
 
dbb07de
f791164
 
 
 
 
f8efad0
 
 
50351e8
f791164
 
 
 
 
 
 
 
50351e8
 
 
 
 
0118ff8
f9bd7be
 
 
 
 
 
 
dbb07de
0118ff8
 
f9bd7be
 
 
 
 
dbb07de
0118ff8
 
f9bd7be
 
 
 
 
dbb07de
0118ff8
 
f9bd7be
 
 
 
 
dbb07de
0118ff8
 
f9bd7be
 
 
 
 
dbb07de
0118ff8
 
f9bd7be
 
 
 
 
dbb07de
0118ff8
 
f9bd7be
 
 
 
 
dbb07de
0118ff8
 
f9bd7be
 
 
 
 
dbb07de
0118ff8
 
f9bd7be
 
 
 
 
dbb07de
f791164
 
f9bd7be
 
 
 
 
dbb07de
f9bd7be
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198

import re
from markdownify import markdownify
import requests
import io
import pandas as pd
from PIL import Image
from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
from huggingface_hub import InferenceClient
from llama_index.core.agent.workflow import ReActAgent
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI

client = InferenceClient(
  provider="hf-inference",
)

DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
search_tool_spec = DuckDuckGoSearchToolSpec()

# Searching tools

def search_tool(query: str) -> str:
    """Browse the web using DuckDuckGo."""
    print(f"Calling search_tool with query: {query}")
    result = search_tool_spec.duckduckgo_full_search(query=query)
    
    print(f"Search results length: {len(result)}")
    print(f"First search result: {result[0] if result else 'No results found'}")
    # converts the list of results to a string
    result_str = "\n".join([f"{i+1}. {item.body}" for i, item in enumerate(result)])
    return result_str

def fetch_file_bytes(task_id: str) -> str | None:
    """
    Fetch a file from the given task ID.
    """
    print(f"Calling fetch_file_bytes with task_id: {task_id}")
    try:
        response = requests.get(f"{DEFAULT_API_URL}/files/{task_id}", timeout=15)
        response.raise_for_status()
        print(f"File {task_id} fetched successfully.")
        return response.content
    except requests.exceptions.RequestException as e:
        print(f"Error fetching file {task_id}: {e}")
        return None

def bytes_to_image(image_bytes: bytes) -> Image:
    """Convert bytes to image URL."""
    print("Calling bytes_to_image")
    file = Image.open(io.BytesIO(image_bytes))
    file.save("temp_image.png")
    return file

def document_bytes_to_text(doc_bytes: bytes) -> str:
    """Convert document bytes to text."""
    print("Calling document_bytes_to_text")
    return doc_bytes.decode("utf-8")

def xlsx_to_text(file_bytes: bytes) -> str:
    """Convert XLSX file bytes to text using pandas."""
    print("Calling xlsx_to_text")
    io_bytes = io.BytesIO(file_bytes)
    df = pd.read_excel(io_bytes, engine='openpyxl')
    return df.to_string(index=False)

def extract_text_from_image(image_url: bytes) -> str:
    """Extract text from an image using Tesseract."""
    print("Calling extract_text_from_image")
    return client.image_to_text(image_url=image_url, task="image-to-text", model="Salesforce/blip-image-captioning-base").generated_text

def extract_text_from_csv(file_bytes: bytes) -> str:
    """Extract text from a CSV file."""
    print("Calling extract_text_from_csv")
    io_bytes = io.BytesIO(file_bytes)
    df = pd.read_csv(io_bytes)
    return df.to_string(index=False)

def extract_text_from_code_file(bytes: bytes) -> str:
    """Extract text from a code file."""
    print("Calling extract_text_from_code_file")
    return bytes.decode("utf-8")

def extract_text_from_audio_file(file_bytes: bytes) -> str:
    """Extract text from an audio file."""
    print("Calling extract_text_from_audio_file")
    return client.automatic_speech_recognition(file_bytes, model="openai/whisper-large-v2").text

def webpage_to_markdown(url: str) -> str:
    """
    Access a web page and return its content as markdown.
    Limits output to 10,000 characters to avoid excessive responses.
    """
    print(f"Calling webpage_to_markdown with url: {url}")
    try:
        response = requests.get(url, timeout=20)
        response.raise_for_status()
        markdown_content = markdownify(response.text).strip()
        markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
        
        print(f"Markdown initial content: {markdown_content[:500]}...")
        
        return markdown_content
    except requests.exceptions.Timeout:
        return "Request timed out. Please try again later or check the URL."
    except requests.exceptions.RequestException as e:
        return f"Error fetching the webpage: {str(e)}"
    except Exception as e:
        return f"Unexpected error: {str(e)}"


llm = HuggingFaceInferenceAPI(
  model_name="Qwen/Qwen2.5-Coder-32B-Instruct",
  max_tokens=5000,
  max_new_tokens=5000,
)
# Initialize tools
# --- ReActAgent and AgentWorkflow tool declaration ---

search_agent = ReActAgent(
    name="search_agent",
    description="Searches the web using DuckDuckGo.",
    system_prompt="A helpful assistant that can search the web using DuckDuckGo.",
    tools=[search_tool],
    llm=llm,
)

fetch_file_agent = ReActAgent(
    name="fetch_file_agent",
    description="Fetches a file from a given task ID.",
    system_prompt="A helpful assistant that can fetch files by task ID.",
    tools=[fetch_file_bytes],
    llm=llm,
)

bytes_to_image_agent = ReActAgent(
    name="bytes_to_image_agent",
    description="Converts bytes to an image.",
    system_prompt="A helpful assistant that can convert bytes to an image.",
    tools=[bytes_to_image],
    llm=llm,
)

document_bytes_to_text_agent = ReActAgent(
    name="document_bytes_to_text_agent",
    description="Converts document bytes to text.",
    system_prompt="A helpful assistant that can convert document bytes to text.",
    tools=[document_bytes_to_text],
    llm=llm,
)

xlsx_to_text_agent = ReActAgent(
    name="xlsx_to_text_agent",
    description="Converts XLSX file bytes to text.",
    system_prompt="A helpful assistant that can convert XLSX file bytes to text.",
    tools=[xlsx_to_text],
    llm=llm,
)

extract_text_from_image_agent = ReActAgent(
    name="extract_text_from_image_agent",
    description="Extracts text from an image using Tesseract.",
    system_prompt="A helpful assistant that can extract text from images.",
    tools=[extract_text_from_image],
    llm=llm,
)

extract_text_from_csv_agent = ReActAgent(
    name="extract_text_from_csv_agent",
    description="Extracts text from a CSV file.",
    system_prompt="A helpful assistant that can extract text from CSV files.",
    tools=[extract_text_from_csv],
    llm=llm,
)

extract_text_from_code_file_agent = ReActAgent(
    name="extract_text_from_code_file_agent",
    description="Extracts text from a code file.",
    system_prompt="A helpful assistant that can extract text from code files.",
    tools=[extract_text_from_code_file],
    llm=llm,
)

extract_text_from_audio_file_agent = ReActAgent(
    name="extract_text_from_audio_file_agent",
    description="Extracts text from an audio file.",
    system_prompt="A helpful assistant that can extract text from audio files.",
    tools=[extract_text_from_audio_file],
    llm=llm,
)

webpage_to_markdown_agent = ReActAgent(
    name="webpage_to_markdown_agent",
    description="Accesses a web page by URL and returns the content as markdown.",
    system_prompt="A helpful assistant that can access web pages and return markdown.",
    tools=[webpage_to_markdown],
    llm=llm,
)