aelin commited on
Commit
0118ff8
·
1 Parent(s): 81917a3

Initial commit

Browse files
__pycache__/_tools.cpython-311.pyc ADDED
Binary file (5.82 kB). View file
 
__pycache__/_types.cpython-311.pyc ADDED
Binary file (689 Bytes). View file
 
_tools.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import io
3
+
4
+ import pandas as pd
5
+ from PIL import Image
6
+ from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
7
+ from llama_index.core.tools import FunctionTool
8
+ from huggingface_hub import InferenceClient
9
+
10
+ client = InferenceClient(
11
+ provider="hf-inference",
12
+ )
13
+
14
+
15
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
16
+ search_tool_spec = DuckDuckGoSearchToolSpec()
17
+
18
+ # Searching tools
19
+ def _search_tool(query: str) -> str:
20
+ """Browse the web using DuckDuckGo."""
21
+ print(f"🔍 Executando busca no DuckDuckGo para: {query}")
22
+
23
+ return search_tool_spec.duckduckgo_full_search(query=query)
24
+
25
+ def _fetch_file_bytes(task_id: str) -> str | None:
26
+ """
27
+ Fetch a file from the given task ID.
28
+ """
29
+
30
+ try:
31
+ response = requests.get(f"{DEFAULT_API_URL}/files/{task_id}", timeout=15)
32
+ response.raise_for_status()
33
+
34
+ print(f"File {task_id} fetched successfully.")
35
+ return response.content
36
+
37
+ except requests.exceptions.RequestException as e:
38
+ print(f"Error fetching file {task_id}: {e}")
39
+ return None
40
+
41
+ # Parsing tools
42
+ def _bytes_to_image(image_bytes: bytes) -> Image:
43
+ """Convert bytes to image URL."""
44
+
45
+ file = Image.open(io.BytesIO(image_bytes))
46
+
47
+ file.save("temp_image.png")
48
+
49
+ return file
50
+
51
+ def _document_bytes_to_text(doc_bytes: bytes) -> str:
52
+ """Convert document bytes to text."""
53
+ return doc_bytes.decode("utf-8")
54
+
55
+ def _xlsx_to_text(file_bytes: bytes) -> str:
56
+ """Convert XLSX file bytes to text using pandas."""
57
+ io_bytes = io.BytesIO(file_bytes)
58
+ df = pd.read_excel(io_bytes, engine='openpyxl')
59
+
60
+ return df.to_string(index=False)
61
+
62
+ # Extracting text tools
63
+ def _extract_text_from_image(image_url: bytes) -> str:
64
+ """Extract text from an image using Tesseract."""
65
+ return client.image_to_text(image_url=image_url, task="image-to-text", model="Salesforce/blip-image-captioning-base").generated_text
66
+
67
+ def _extract_text_from_csv(file_bytes: bytes) -> str:
68
+ """Extract text from a CSV file."""
69
+ io_bytes = io.BytesIO(file_bytes)
70
+ df = pd.read_csv(io_bytes)
71
+
72
+ return df.to_string(index=False)
73
+
74
+ def _extract_text_from_code_file(bytes: bytes) -> str:
75
+ """Extract text from a code file."""
76
+ return bytes.decode("utf-8")
77
+
78
+ def _extract_text_from_audio_file(file_bytes: bytes) -> str:
79
+ """Extract text from an audio file."""
80
+ return client.automatic_speech_recognition(file_bytes, model="openai/whisper-large-v2").text
81
+
82
+ # Initialize tools
83
+ search_tool = FunctionTool.from_defaults(
84
+ _search_tool,
85
+ name="DuckDuckGo Search",
86
+ description="Search the web using DuckDuckGo."
87
+ )
88
+
89
+ fetch_file_bytes_tool = FunctionTool.from_defaults(
90
+ _fetch_file_bytes,
91
+ name="Fetch File Bytes",
92
+ description="Fetch a file from the given task ID."
93
+ )
94
+
95
+ bytes_to_image_tool = FunctionTool.from_defaults(
96
+ _bytes_to_image,
97
+ name="Bytes to Image",
98
+ description="Convert bytes to image URL."
99
+ )
100
+
101
+ document_bytes_to_text_tool = FunctionTool.from_defaults(
102
+ _document_bytes_to_text,
103
+ name="Document Bytes to Text",
104
+ description="Convert bytes to document text, i.e., .txt, .pdf, etc."
105
+ )
106
+
107
+ xlsx_to_text_tool = FunctionTool.from_defaults(
108
+ _xlsx_to_text,
109
+ name="XLSX to Text",
110
+ description="Convert XLSX file bytes to text."
111
+ )
112
+
113
+ extract_text_from_image_tool = FunctionTool.from_defaults(
114
+ _extract_text_from_image,
115
+ name="Extract Text from Image",
116
+ description="Extract text from an image using Tesseract."
117
+ )
118
+
119
+ extract_text_from_csv_tool = FunctionTool.from_defaults(
120
+ _extract_text_from_csv,
121
+ name="Extract Text from CSV",
122
+ description="Extract text from a CSV file."
123
+ )
124
+
125
+ extract_text_from_code_file_tool = FunctionTool.from_defaults(
126
+ _extract_text_from_code_file,
127
+ name="Extract Text from Code File",
128
+ description="Extract text from a code file, i.e., .py, .js, .java, etc."
129
+ )
130
+
131
+ extract_text_from_audio_file_tool = FunctionTool.from_defaults(
132
+ _extract_text_from_audio_file,
133
+ name="Extract Text from Audio File",
134
+ description="Extract text from an audio file."
135
+ )
136
+
137
+ tools = [
138
+ search_tool,
139
+ fetch_file_bytes_tool,
140
+ bytes_to_image_tool,
141
+ document_bytes_to_text_tool,
142
+ extract_text_from_image_tool,
143
+ extract_text_from_csv_tool,
144
+ extract_text_from_code_file_tool,
145
+ extract_text_from_audio_file_tool,
146
+ xlsx_to_text_tool,
147
+ ]
148
+
_types.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypedDict, Optional, List
2
+
3
+ class Question(TypedDict):
4
+ task_id: str
5
+ question: str
6
+ file_name: Optional[str] = None
7
+
8
+
9
+ # A list of questions based on Question type
10
+ Questions = List[Question]
app.py CHANGED
@@ -1,8 +1,15 @@
1
  import os
2
  import gradio as gr
3
  import requests
4
- import inspect
5
  import pandas as pd
 
 
 
 
 
 
 
 
6
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
@@ -13,95 +20,143 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
  class BasicAgent:
14
  def __init__(self):
15
  print("BasicAgent initialized.")
16
- def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
21
 
22
- def run_and_submit_all( profile: gr.OAuthProfile | None):
23
- """
24
- Fetches all questions, runs the BasicAgent on them, submits all answers,
25
- and displays the results.
26
- """
27
- # --- Determine HF Space Runtime URL and Repo URL ---
28
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
29
 
30
- if profile:
31
- username= f"{profile.username}"
32
- print(f"User logged in: {username}")
33
- else:
34
- print("User not logged in.")
35
- return "Please Login to Hugging Face with the button.", None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
- api_url = DEFAULT_API_URL
38
- questions_url = f"{api_url}/questions"
39
- submit_url = f"{api_url}/submit"
40
 
41
- # 1. Instantiate Agent ( modify this part to create your agent)
 
 
 
 
42
  try:
43
  agent = BasicAgent()
 
 
 
44
  except Exception as e:
45
  print(f"Error instantiating agent: {e}")
46
- return f"Error initializing agent: {e}", None
47
- # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
48
- agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
49
- print(agent_code)
50
 
51
- # 2. Fetch Questions
52
  print(f"Fetching questions from: {questions_url}")
 
53
  try:
54
  response = requests.get(questions_url, timeout=15)
55
  response.raise_for_status()
56
  questions_data = response.json()
57
- if not questions_data:
58
- print("Fetched questions list is empty.")
59
- return "Fetched questions list is empty or invalid format.", None
 
 
 
60
  print(f"Fetched {len(questions_data)} questions.")
 
 
 
61
  except requests.exceptions.RequestException as e:
62
  print(f"Error fetching questions: {e}")
63
- return f"Error fetching questions: {e}", None
 
 
64
  except requests.exceptions.JSONDecodeError as e:
65
- print(f"Error decoding JSON response from questions endpoint: {e}")
66
- print(f"Response text: {response.text[:500]}")
67
- return f"Error decoding server response for questions: {e}", None
 
 
68
  except Exception as e:
69
  print(f"An unexpected error occurred fetching questions: {e}")
70
- return f"An unexpected error occurred fetching questions: {e}", None
 
71
 
72
- # 3. Run your Agent
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  results_log = []
74
  answers_payload = []
 
75
  print(f"Running agent on {len(questions_data)} questions...")
 
76
  for item in questions_data:
77
  task_id = item.get("task_id")
78
  question_text = item.get("question")
 
79
  if not task_id or question_text is None:
80
  print(f"Skipping item with missing task_id or question: {item}")
81
  continue
 
82
  try:
83
- submitted_answer = agent(question_text)
84
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
 
86
  except Exception as e:
87
- print(f"Error running agent on task {task_id}: {e}")
88
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
89
-
90
- if not answers_payload:
91
- print("Agent did not produce any answers to submit.")
92
- return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
 
94
- # 4. Prepare Submission
95
- submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
96
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
97
- print(status_update)
98
-
99
- # 5. Submit
100
- print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
101
  try:
102
  response = requests.post(submit_url, json=submission_data, timeout=60)
103
  response.raise_for_status()
104
  result_data = response.json()
 
105
  final_status = (
106
  f"Submission Successful!\n"
107
  f"User: {result_data.get('username')}\n"
@@ -109,8 +164,10 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
109
  f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
110
  f"Message: {result_data.get('message', 'No message received.')}"
111
  )
 
112
  print("Submission successful.")
113
  results_df = pd.DataFrame(results_log)
 
114
  return final_status, results_df
115
  except requests.exceptions.HTTPError as e:
116
  error_detail = f"Server responded with status {e.response.status_code}."
@@ -119,26 +176,84 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
119
  error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
120
  except requests.exceptions.JSONDecodeError:
121
  error_detail += f" Response: {e.response.text[:500]}"
 
122
  status_message = f"Submission Failed: {error_detail}"
123
  print(status_message)
124
  results_df = pd.DataFrame(results_log)
 
125
  return status_message, results_df
 
126
  except requests.exceptions.Timeout:
127
  status_message = "Submission Failed: The request timed out."
128
  print(status_message)
129
  results_df = pd.DataFrame(results_log)
 
130
  return status_message, results_df
 
131
  except requests.exceptions.RequestException as e:
132
  status_message = f"Submission Failed: Network error - {e}"
133
  print(status_message)
134
  results_df = pd.DataFrame(results_log)
 
135
  return status_message, results_df
 
136
  except Exception as e:
137
  status_message = f"An unexpected error occurred during submission: {e}"
138
  print(status_message)
139
  results_df = pd.DataFrame(results_log)
 
140
  return status_message, results_df
141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
  # --- Build Gradio Interface using Blocks ---
144
  with gr.Blocks() as demo:
 
1
  import os
2
  import gradio as gr
3
  import requests
 
4
  import pandas as pd
5
+ from _types import Questions, Question
6
+ from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
7
+ from llama_index.core.agent.workflow import AgentWorkflow
8
+ from _tools import tools
9
+ import asyncio
10
+ from huggingface_hub import login
11
+
12
+ login()
13
 
14
  # (Keep Constants as is)
15
  # --- Constants ---
 
20
  class BasicAgent:
21
  def __init__(self):
22
  print("BasicAgent initialized.")
23
+
24
+ llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
 
 
 
25
 
26
+ agent = AgentWorkflow.from_tools_or_functions(
27
+ tools,
28
+ llm=llm,
29
+ verbose=True
30
+ )
31
+
32
+ self.agent = agent
33
 
34
+ async def run(self, question: Question) -> str:
35
+ question_text = question["question"]
36
+ task_id = question["task_id"]
37
+ file_name = question.get("file_name")
38
+
39
+ """
40
+ Run the agent with the provided question and return the answer.
41
+ """
42
+ print(f"Agent received question (first 50 chars): {question_text[:50]}...")
43
+
44
+ prompt = f"""
45
+ You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template:
46
+ YOUR ANSWER.
47
+
48
+ YOUR ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
49
+ If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
50
+ If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless
51
+ specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in
52
+ the list is a number or a string.\n
53
+
54
+ The question is: {question_text}\n
55
+
56
+ If the question has a file, the file name is the task ID: {task_id}. You can use it to fetch the bytes of the file and parse
57
+ as you want. The file name is: {file_name}.\n
58
+ """
59
 
60
+ answer = await self.agent.run(prompt)
 
 
61
 
62
+ print(f"Agent returning answer: {answer}")
63
+
64
+ return answer
65
+
66
+ def instantiate_agent():
67
  try:
68
  agent = BasicAgent()
69
+
70
+ return agent, None
71
+
72
  except Exception as e:
73
  print(f"Error instantiating agent: {e}")
74
+
75
+ return None, f"Error initializing agent: {e}"
 
 
76
 
77
+ def fetch_questions(questions_url):
78
  print(f"Fetching questions from: {questions_url}")
79
+
80
  try:
81
  response = requests.get(questions_url, timeout=15)
82
  response.raise_for_status()
83
  questions_data = response.json()
84
+
85
+ if not questions_data:
86
+ print("Fetched questions list is empty.")
87
+
88
+ return None, "Fetched questions list is empty or invalid format."
89
+
90
  print(f"Fetched {len(questions_data)} questions.")
91
+
92
+ return questions_data, None
93
+
94
  except requests.exceptions.RequestException as e:
95
  print(f"Error fetching questions: {e}")
96
+
97
+ return None, f"Error fetching questions: {e}"
98
+
99
  except requests.exceptions.JSONDecodeError as e:
100
+ print(f"Error decoding JSON response from questions endpoint: {e}")
101
+ print(f"Response text: {response.text[:500]}")
102
+
103
+ return None, f"Error decoding server response for questions: {e}"
104
+
105
  except Exception as e:
106
  print(f"An unexpected error occurred fetching questions: {e}")
107
+
108
+ return None, f"An unexpected error occurred fetching questions: {e}"
109
 
110
+ async def fetch_file(question: Question) -> str | None:
111
+ """
112
+ Fetch files from the provided list of file paths.
113
+ """
114
+ file_url = f"{DEFAULT_API_URL}/files"
115
+
116
+ try:
117
+ response = requests.get(f"{file_url}/{question['task_id']}", timeout=15)
118
+ response.raise_for_status()
119
+
120
+ print(f"File {question['task_id']} fetched successfully.")
121
+ return response.content
122
+
123
+ except requests.exceptions.RequestException as e:
124
+ print(f"Error fetching file {question['task_id']}: {e}")
125
+ return None
126
+
127
+ async def run_agent_on_questions(agent: BasicAgent, questions_data: Questions):
128
  results_log = []
129
  answers_payload = []
130
+
131
  print(f"Running agent on {len(questions_data)} questions...")
132
+
133
  for item in questions_data:
134
  task_id = item.get("task_id")
135
  question_text = item.get("question")
136
+
137
  if not task_id or question_text is None:
138
  print(f"Skipping item with missing task_id or question: {item}")
139
  continue
140
+
141
  try:
142
+ submitted_answer = await agent.run(item)
143
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
144
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
145
+
146
  except Exception as e:
147
+ print(f"Error running agent on task {task_id}: {e}")
148
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
149
+
150
+ return answers_payload, results_log
 
 
151
 
152
+ def submit_answers(submit_url, submission_data, results_log):
153
+ print(f"Submitting {len(submission_data['answers'])} answers to: {submit_url}")
154
+
 
 
 
 
155
  try:
156
  response = requests.post(submit_url, json=submission_data, timeout=60)
157
  response.raise_for_status()
158
  result_data = response.json()
159
+
160
  final_status = (
161
  f"Submission Successful!\n"
162
  f"User: {result_data.get('username')}\n"
 
164
  f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
165
  f"Message: {result_data.get('message', 'No message received.')}"
166
  )
167
+
168
  print("Submission successful.")
169
  results_df = pd.DataFrame(results_log)
170
+
171
  return final_status, results_df
172
  except requests.exceptions.HTTPError as e:
173
  error_detail = f"Server responded with status {e.response.status_code}."
 
176
  error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
177
  except requests.exceptions.JSONDecodeError:
178
  error_detail += f" Response: {e.response.text[:500]}"
179
+
180
  status_message = f"Submission Failed: {error_detail}"
181
  print(status_message)
182
  results_df = pd.DataFrame(results_log)
183
+
184
  return status_message, results_df
185
+
186
  except requests.exceptions.Timeout:
187
  status_message = "Submission Failed: The request timed out."
188
  print(status_message)
189
  results_df = pd.DataFrame(results_log)
190
+
191
  return status_message, results_df
192
+
193
  except requests.exceptions.RequestException as e:
194
  status_message = f"Submission Failed: Network error - {e}"
195
  print(status_message)
196
  results_df = pd.DataFrame(results_log)
197
+
198
  return status_message, results_df
199
+
200
  except Exception as e:
201
  status_message = f"An unexpected error occurred during submission: {e}"
202
  print(status_message)
203
  results_df = pd.DataFrame(results_log)
204
+
205
  return status_message, results_df
206
 
207
+ async def run_and_submit_all(profile: gr.OAuthProfile | None):
208
+ """
209
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
210
+ and displays the results.
211
+ """
212
+ space_id = os.getenv("SPACE_ID")
213
+
214
+ if profile:
215
+ username = f"{profile.username}"
216
+ print(f"User logged in: {username}")
217
+ else:
218
+ print("User not logged in.")
219
+ return "Please Login to Hugging Face with the button.", None
220
+
221
+ api_url = DEFAULT_API_URL
222
+ questions_url = f"{api_url}/questions"
223
+ submit_url = f"{api_url}/submit"
224
+
225
+
226
+ agent, agent_error = instantiate_agent()
227
+
228
+ if agent_error:
229
+ return agent_error, None
230
+
231
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
232
+ print(agent_code)
233
+ questions_data, questions_error = fetch_questions(questions_url)
234
+
235
+ if questions_error:
236
+ return questions_error, None
237
+
238
+ answers_payload, results_log = await run_agent_on_questions(agent, questions_data)
239
+
240
+ if not answers_payload:
241
+ print("Agent did not produce any answers to submit.")
242
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
243
+
244
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
245
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
246
+ print(status_update)
247
+
248
+ return submit_answers(submit_url, submission_data, results_log)
249
+
250
+
251
+
252
+ async def main():
253
+ await run_and_submit_all(profile=None)
254
+
255
+ loop = asyncio.get_event_loop()
256
+ loop.run_until_complete(main())
257
 
258
  # --- Build Gradio Interface using Blocks ---
259
  with gr.Blocks() as demo:
image.png ADDED
requirements.txt CHANGED
@@ -1,2 +1,196 @@
1
- gradio
2
- requests
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==1.6.0
2
+ aiofiles==24.1.0
3
+ aiohappyeyeballs==2.6.1
4
+ aiohttp==3.11.18
5
+ aiosignal==1.3.2
6
+ annotated-types==0.7.0
7
+ anthropic==0.50.0
8
+ anyio==4.9.0
9
+ asgiref==3.8.1
10
+ asyncio==3.4.3
11
+ attrs==25.3.0
12
+ backoff==2.2.1
13
+ banks==2.1.2
14
+ bcrypt==4.3.0
15
+ beautifulsoup4==4.13.4
16
+ bm25s==0.2.12
17
+ boto3==1.38.8
18
+ botocore==1.38.8
19
+ build==1.2.2.post1
20
+ cachetools==5.5.2
21
+ certifi==2025.4.26
22
+ charset-normalizer==3.4.2
23
+ chroma-hnswlib==0.7.6
24
+ chromadb==1.0.7
25
+ click==8.1.8
26
+ colorama==0.4.6
27
+ coloredlogs==15.0.1
28
+ dataclasses-json==0.6.7
29
+ datasets==3.5.1
30
+ Deprecated==1.2.18
31
+ dill==0.3.8
32
+ dirtyjson==1.0.8
33
+ distro==1.9.0
34
+ duckduckgo_search==6.4.2
35
+ durationpy==0.9
36
+ et_xmlfile==2.0.0
37
+ fastapi==0.115.9
38
+ ffmpy==0.5.0
39
+ filelock==3.18.0
40
+ filetype==1.2.0
41
+ flatbuffers==25.2.10
42
+ frozenlist==1.6.0
43
+ fsspec==2025.3.0
44
+ google-auth==2.39.0
45
+ googleapis-common-protos==1.70.0
46
+ gradio==5.29.0
47
+ gradio_client==1.10.0
48
+ greenlet==3.2.1
49
+ griffe==1.7.3
50
+ groovy==0.1.2
51
+ grpcio==1.71.0
52
+ h11==0.16.0
53
+ httpcore==1.0.9
54
+ httptools==0.6.4
55
+ httpx==0.28.1
56
+ huggingface-hub==0.30.2
57
+ humanfriendly==10.0
58
+ idna==3.10
59
+ importlib_metadata==8.6.1
60
+ importlib_resources==6.5.2
61
+ Jinja2==3.1.6
62
+ jiter==0.9.0
63
+ jmespath==1.0.1
64
+ joblib==1.5.0
65
+ jsonschema==4.23.0
66
+ jsonschema-specifications==2025.4.1
67
+ kubernetes==32.0.1
68
+ llama-cloud==0.1.19
69
+ llama-cloud-services==0.6.21
70
+ llama-index==0.12.34
71
+ llama-index-agent-openai==0.4.7
72
+ llama-index-cli==0.4.1
73
+ llama-index-core==0.12.34.post1
74
+ llama-index-embeddings-huggingface==0.5.3
75
+ llama-index-embeddings-openai==0.3.1
76
+ llama-index-indices-managed-llama-cloud==0.6.11
77
+ llama-index-llms-anthropic==0.6.10
78
+ llama-index-llms-huggingface==0.5.0
79
+ llama-index-llms-huggingface-api==0.4.2
80
+ llama-index-llms-llama-api==0.4.0
81
+ llama-index-llms-openai==0.3.38
82
+ llama-index-llms-openai-like==0.3.4
83
+ llama-index-multi-modal-llms-openai==0.4.3
84
+ llama-index-program-openai==0.3.1
85
+ llama-index-question-gen-openai==0.3.0
86
+ llama-index-readers-file==0.4.7
87
+ llama-index-readers-llama-parse==0.4.0
88
+ llama-index-retrievers-bm25==0.5.2
89
+ llama-index-tools-duckduckgo==0.3.0
90
+ llama-index-vector-stores-chroma==0.4.1
91
+ llama-parse==0.6.21
92
+ markdown-it-py==3.0.0
93
+ markdownify==1.1.0
94
+ MarkupSafe==3.0.2
95
+ marshmallow==3.26.1
96
+ mdurl==0.1.2
97
+ mmh3==5.1.0
98
+ mpmath==1.3.0
99
+ multidict==6.4.3
100
+ multiprocess==0.70.16
101
+ mypy_extensions==1.1.0
102
+ nest-asyncio==1.6.0
103
+ networkx==3.4.2
104
+ nltk==3.9.1
105
+ numpy==2.2.5
106
+ oauthlib==3.2.2
107
+ onnxruntime==1.21.1
108
+ openai==1.77.0
109
+ openpyxl==3.1.5
110
+ opentelemetry-api==1.32.1
111
+ opentelemetry-exporter-otlp-proto-common==1.32.1
112
+ opentelemetry-exporter-otlp-proto-grpc==1.32.1
113
+ opentelemetry-instrumentation==0.53b1
114
+ opentelemetry-instrumentation-asgi==0.53b1
115
+ opentelemetry-instrumentation-fastapi==0.53b1
116
+ opentelemetry-proto==1.32.1
117
+ opentelemetry-sdk==1.32.1
118
+ opentelemetry-semantic-conventions==0.53b1
119
+ opentelemetry-util-http==0.53b1
120
+ orjson==3.10.18
121
+ overrides==7.7.0
122
+ packaging==25.0
123
+ pandas==2.2.3
124
+ pillow==11.2.1
125
+ platformdirs==4.3.7
126
+ posthog==4.0.1
127
+ primp==0.15.0
128
+ propcache==0.3.1
129
+ protobuf==5.29.4
130
+ psutil==7.0.0
131
+ pyarrow==20.0.0
132
+ pyasn1==0.6.1
133
+ pyasn1_modules==0.4.2
134
+ pydantic==2.11.4
135
+ pydantic_core==2.33.2
136
+ pydub==0.25.1
137
+ Pygments==2.19.1
138
+ pypdf==5.4.0
139
+ PyPika==0.48.9
140
+ pyproject_hooks==1.2.0
141
+ pyreadline3==3.5.4
142
+ PyStemmer==2.2.0.3
143
+ pytesseract==0.3.13
144
+ python-dateutil==2.9.0.post0
145
+ python-dotenv==1.1.0
146
+ python-multipart==0.0.20
147
+ pytz==2025.2
148
+ PyYAML==6.0.2
149
+ referencing==0.36.2
150
+ regex==2024.11.6
151
+ requests==2.32.3
152
+ requests-oauthlib==2.0.0
153
+ rich==14.0.0
154
+ rpds-py==0.24.0
155
+ rsa==4.9.1
156
+ ruff==0.11.8
157
+ s3transfer==0.12.0
158
+ safehttpx==0.1.6
159
+ safetensors==0.5.3
160
+ scikit-learn==1.6.1
161
+ scipy==1.15.2
162
+ semantic-version==2.10.0
163
+ sentence-transformers==4.1.0
164
+ shellingham==1.5.4
165
+ six==1.17.0
166
+ smolagents==1.14.0
167
+ sniffio==1.3.1
168
+ soupsieve==2.7
169
+ SpeechRecognition==3.14.2
170
+ SQLAlchemy==2.0.40
171
+ starlette==0.45.3
172
+ striprtf==0.0.26
173
+ sympy==1.14.0
174
+ tenacity==9.1.2
175
+ tesseract==0.1.3
176
+ threadpoolctl==3.6.0
177
+ tiktoken==0.9.0
178
+ tokenizers==0.21.1
179
+ tomlkit==0.13.2
180
+ torch==2.7.0
181
+ tqdm==4.67.1
182
+ transformers==4.51.3
183
+ typer==0.15.3
184
+ typing-inspect==0.9.0
185
+ typing-inspection==0.4.0
186
+ typing_extensions==4.13.2
187
+ tzdata==2025.2
188
+ urllib3==2.4.0
189
+ uvicorn==0.34.2
190
+ watchfiles==1.0.5
191
+ websocket-client==1.8.0
192
+ websockets==15.0.1
193
+ wrapt==1.17.2
194
+ xxhash==3.5.0
195
+ yarl==1.20.0
196
+ zipp==3.21.0
temp_image.png ADDED