Spaces:
Configuration error
Configuration error
oremaz
commited on
Commit
·
3c74781
1
Parent(s):
6eb1ce3
Update agent.py
Browse files
agent.py
CHANGED
|
@@ -64,7 +64,7 @@ logging.basicConfig(level=logging.INFO)
|
|
| 64 |
logging.getLogger("llama_index.core.agent").setLevel(logging.DEBUG)
|
| 65 |
logging.getLogger("llama_index.llms").setLevel(logging.DEBUG)
|
| 66 |
|
| 67 |
-
model_id = "
|
| 68 |
proj_llm = HuggingFaceLLM(
|
| 69 |
model_name=model_id,
|
| 70 |
tokenizer_name=model_id,
|
|
@@ -266,17 +266,28 @@ extract_url_tool = FunctionTool.from_defaults(
|
|
| 266 |
)
|
| 267 |
)
|
| 268 |
|
| 269 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 270 |
def create_forced_rag_pipeline():
|
| 271 |
pipeline = QueryPipeline(verbose=True)
|
| 272 |
|
| 273 |
-
#
|
| 274 |
pipeline.add_modules({
|
| 275 |
-
"read_and_parse":
|
| 276 |
-
"create_rag":
|
| 277 |
})
|
| 278 |
|
| 279 |
-
# Forcer la liaison
|
| 280 |
pipeline.add_link("read_and_parse", "create_rag")
|
| 281 |
|
| 282 |
return pipeline
|
|
@@ -388,20 +399,7 @@ from llama_index.llms.huggingface import HuggingFaceLLM
|
|
| 388 |
# --- 1. Initialize a dedicated LLM for Code Generation ---
|
| 389 |
# It's good practice to use a model specifically fine-tuned for coding.
|
| 390 |
# This model is loaded only once for efficiency.
|
| 391 |
-
|
| 392 |
-
code_llm = HuggingFaceLLM(
|
| 393 |
-
model_name="Qwen/Qwen2.5-Coder-3B",
|
| 394 |
-
tokenizer_name="Qwen/Qwen2.5-Coder-3B",
|
| 395 |
-
device_map="auto",
|
| 396 |
-
model_kwargs={"torch_dtype": "auto"},
|
| 397 |
-
# Set generation parameters for precise, non-creative code output
|
| 398 |
-
generate_kwargs={"temperature": 0.1, "do_sample": False}
|
| 399 |
-
)
|
| 400 |
-
except Exception as e:
|
| 401 |
-
print(f"Error initializing code generation model: {e}")
|
| 402 |
-
print("Code generation tool will not be available.")
|
| 403 |
-
code_llm = None
|
| 404 |
-
|
| 405 |
|
| 406 |
def generate_python_code(query: str) -> str:
|
| 407 |
"""
|
|
|
|
| 64 |
logging.getLogger("llama_index.core.agent").setLevel(logging.DEBUG)
|
| 65 |
logging.getLogger("llama_index.llms").setLevel(logging.DEBUG)
|
| 66 |
|
| 67 |
+
model_id = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
|
| 68 |
proj_llm = HuggingFaceLLM(
|
| 69 |
model_name=model_id,
|
| 70 |
tokenizer_name=model_id,
|
|
|
|
| 266 |
)
|
| 267 |
)
|
| 268 |
|
| 269 |
+
from llama_index.core.query_pipeline import QueryPipeline, FnComponent
|
| 270 |
+
|
| 271 |
+
# Convertir vos fonctions en composants de pipeline
|
| 272 |
+
def read_and_parse_fn(input_path: str):
|
| 273 |
+
"""Function compatible avec QueryPipeline"""
|
| 274 |
+
return read_and_parse_content(input_path)
|
| 275 |
+
|
| 276 |
+
def create_rag_fn(documents):
|
| 277 |
+
"""Function compatible avec QueryPipeline"""
|
| 278 |
+
return create_rag_tool(documents)
|
| 279 |
+
|
| 280 |
+
# Créer le pipeline avec FnComponent
|
| 281 |
def create_forced_rag_pipeline():
|
| 282 |
pipeline = QueryPipeline(verbose=True)
|
| 283 |
|
| 284 |
+
# Utiliser FnComponent au lieu de FunctionTool
|
| 285 |
pipeline.add_modules({
|
| 286 |
+
"read_and_parse": FnComponent(fn=read_and_parse_fn),
|
| 287 |
+
"create_rag": FnComponent(fn=create_rag_fn),
|
| 288 |
})
|
| 289 |
|
| 290 |
+
# Forcer la liaison
|
| 291 |
pipeline.add_link("read_and_parse", "create_rag")
|
| 292 |
|
| 293 |
return pipeline
|
|
|
|
| 399 |
# --- 1. Initialize a dedicated LLM for Code Generation ---
|
| 400 |
# It's good practice to use a model specifically fine-tuned for coding.
|
| 401 |
# This model is loaded only once for efficiency.
|
| 402 |
+
code_llm = proj_llm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 403 |
|
| 404 |
def generate_python_code(query: str) -> str:
|
| 405 |
"""
|