Spaces:
Configuration error
Configuration error
oremaz
commited on
Commit
·
23becb6
1
Parent(s):
3c74781
Update agent.py
Browse files
agent.py
CHANGED
|
@@ -64,7 +64,7 @@ logging.basicConfig(level=logging.INFO)
|
|
| 64 |
logging.getLogger("llama_index.core.agent").setLevel(logging.DEBUG)
|
| 65 |
logging.getLogger("llama_index.llms").setLevel(logging.DEBUG)
|
| 66 |
|
| 67 |
-
model_id = "
|
| 68 |
proj_llm = HuggingFaceLLM(
|
| 69 |
model_name=model_id,
|
| 70 |
tokenizer_name=model_id,
|
|
@@ -399,7 +399,14 @@ from llama_index.llms.huggingface import HuggingFaceLLM
|
|
| 399 |
# --- 1. Initialize a dedicated LLM for Code Generation ---
|
| 400 |
# It's good practice to use a model specifically fine-tuned for coding.
|
| 401 |
# This model is loaded only once for efficiency.
|
| 402 |
-
code_llm =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 403 |
|
| 404 |
def generate_python_code(query: str) -> str:
|
| 405 |
"""
|
|
|
|
| 64 |
logging.getLogger("llama_index.core.agent").setLevel(logging.DEBUG)
|
| 65 |
logging.getLogger("llama_index.llms").setLevel(logging.DEBUG)
|
| 66 |
|
| 67 |
+
model_id = "Qwen/Qwen2.5-7B-Instruct"
|
| 68 |
proj_llm = HuggingFaceLLM(
|
| 69 |
model_name=model_id,
|
| 70 |
tokenizer_name=model_id,
|
|
|
|
| 399 |
# --- 1. Initialize a dedicated LLM for Code Generation ---
|
| 400 |
# It's good practice to use a model specifically fine-tuned for coding.
|
| 401 |
# This model is loaded only once for efficiency.
|
| 402 |
+
code_llm = HuggingFaceLLM(
|
| 403 |
+
model_name="Qwen/Qwen2.5-Coder-3B",
|
| 404 |
+
tokenizer_name="Qwen/Qwen2.5-Coder-3B",
|
| 405 |
+
device_map="auto",
|
| 406 |
+
model_kwargs={"torch_dtype": "auto"},
|
| 407 |
+
# Set generation parameters for precise, non-creative code output
|
| 408 |
+
generate_kwargs={"temperature": 0.0, "do_sample": False}
|
| 409 |
+
)
|
| 410 |
|
| 411 |
def generate_python_code(query: str) -> str:
|
| 412 |
"""
|