Francesco-A commited on
Commit
6cc7969
·
1 Parent(s): ab92d80

FIX: removed LocalAgent

Browse files
Files changed (2) hide show
  1. agent.py +0 -73
  2. requirements.txt +1 -6
agent.py CHANGED
@@ -3,10 +3,6 @@
3
  import os
4
  from typing import Optional
5
  import pandas as pd
6
- import torch
7
-
8
- # Local agent specific
9
- from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
10
 
11
  # Smolagents imports
12
  from smolagents import (
@@ -191,72 +187,3 @@ class GeminiAgent:
191
  )
192
 
193
  return self.gemini_agent.run(prompt)
194
-
195
- class LocalAgent:
196
- def __init__(self):
197
- checkpoint = "Qwen/Qwen2.5-7B-Instruct"
198
- quantized_model_dir = "./quantized_model"
199
-
200
- # Define the quantized configuration
201
- bnb_config = BitsAndBytesConfig(
202
- load_in_4bit = True,
203
- bnb_4bit_quant_type = "nf4",
204
- bnb_4bit_compute_dtype = torch.bfloat16,
205
- bnb_4bit_use_double_quant = True,
206
- )
207
-
208
- # Load quantized model and tokenizer
209
- temp_model = AutoModelForCausalLM.from_pretrained(
210
- checkpoint,
211
- quantization_config = bnb_config,
212
- device_map="auto" # use multiple GPUs if available
213
- )
214
- temp_tokenizer = AutoTokenizer.from_pretrained(checkpoint)
215
-
216
- # Save the model in local path (seems like it's the only way to make it work with TransformersModel)
217
- temp_model.save_pretrained(quantized_model_dir)
218
- temp_tokenizer.save_pretrained(quantized_model_dir)
219
-
220
- self.system_prompt = SYSTEM_PROMPT
221
-
222
- self.model = TransformersModel(
223
- model_path = quantized_model_dir,
224
- temperature = 0.1,
225
- top_p = 0.95,
226
- device_map = "auto",
227
- max_new_tokens = 8196 # https://github.com/huggingface/smolagents/issues/414#:~:text=Running%20with%20TransformersModel%20does%20not%20work
228
- )
229
- self.tools = AGENT_TOOLS
230
-
231
- self.local_agent = CodeAgent(
232
- model=self.model,
233
- tools=tools,
234
- add_base_tools=True, # probably redundant, but it does not hurt
235
- max_steps=5,
236
- additional_authorized_imports = ['numpy','subprocess', 're', 'pandas',
237
- 'json', 'os', 'pathlib', 'tempfile',
238
- # 'matplotlib.pyplot', 'seaborn'
239
- ],
240
- verbosity_level = 1,
241
- max_print_outputs_length=1_000_000
242
- )
243
-
244
- print("✅ Local (quantized) agent initialized.")
245
-
246
- def __call__(self, question: str, file_path: Optional[str] = None) -> str:
247
-
248
- if file_path:
249
- # Inject system prompt + question and (optional) file path
250
- prompt = (
251
- f"{self.system_prompt}\n\n"
252
- f"Question: {question}\n\n"
253
- f"There is an associated file at path: {file_path}.\n"
254
- f"Use the appropriate tool to download it (if necessary) and read it before answering"
255
- )
256
- else:
257
- prompt = (
258
- f"{self.system_prompt}\n\n"
259
- f"Question: {question}\n\n"
260
- )
261
-
262
- return self.local_agent.run(prompt)
 
3
  import os
4
  from typing import Optional
5
  import pandas as pd
 
 
 
 
6
 
7
  # Smolagents imports
8
  from smolagents import (
 
187
  )
188
 
189
  return self.gemini_agent.run(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -25,9 +25,4 @@ pytubefix==10.3.6
25
  openai-whisper==20250625
26
 
27
  # OCR (OPTIONAL, disabled)
28
- # pytesseract==0.3.13
29
-
30
- # Additional for LocalAgent (optional)
31
- transformers==4.1.0
32
- bitsandbytes==0.49.0
33
- torch==2.6.0
 
25
  openai-whisper==20250625
26
 
27
  # OCR (OPTIONAL, disabled)
28
+ # pytesseract==0.3.13