Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
|
| 2 |
-
|
| 3 |
# app.py
|
| 4 |
|
| 5 |
# AI Agent Framework Imports
|
|
@@ -83,15 +82,38 @@ def toggle_auto_execution(enable: bool) -> str:
|
|
| 83 |
|
| 84 |
# --------------------------------------------
|
| 85 |
# Configure the AI Model.
|
|
|
|
| 86 |
model = HfApiModel(
|
| 87 |
max_tokens=2096, # Maximum response length.
|
| 88 |
temperature=0.5, # Controls response randomness.
|
| 89 |
-
|
|
|
|
|
|
|
|
|
|
| 90 |
custom_role_conversions=None
|
| 91 |
)
|
| 92 |
|
| 93 |
# Load prompt templates from a YAML configuration file.
|
| 94 |
with open("prompts.yaml", 'r') as stream:
|
| 95 |
prompt_templates = yaml.safe_load(stream)
|
| 96 |
-
#
|
| 97 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
|
|
|
|
| 2 |
# app.py
|
| 3 |
|
| 4 |
# AI Agent Framework Imports
|
|
|
|
| 82 |
|
| 83 |
# --------------------------------------------
|
| 84 |
# Configure the AI Model.
|
| 85 |
+
# If the agent does not answer because the model is overloaded, you can switch to the alternative endpoint.
|
| 86 |
model = HfApiModel(
|
| 87 |
max_tokens=2096, # Maximum response length.
|
| 88 |
temperature=0.5, # Controls response randomness.
|
| 89 |
+
# Use the following model_id if the primary model is overloaded:
|
| 90 |
+
model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',
|
| 91 |
+
# Alternatively, you can revert to the original model by using:
|
| 92 |
+
# model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
| 93 |
custom_role_conversions=None
|
| 94 |
)
|
| 95 |
|
| 96 |
# Load prompt templates from a YAML configuration file.
|
| 97 |
with open("prompts.yaml", 'r') as stream:
|
| 98 |
prompt_templates = yaml.safe_load(stream)
|
|
|
|
| 99 |
|
| 100 |
+
# Create the AI agent with the defined tools.
|
| 101 |
+
agent = CodeAgent(
|
| 102 |
+
model=model,
|
| 103 |
+
tools=[
|
| 104 |
+
FinalAnswerTool(),
|
| 105 |
+
detect_ambiguity,
|
| 106 |
+
explain_assumed_knowledge,
|
| 107 |
+
highlight_elements,
|
| 108 |
+
explain_code_line,
|
| 109 |
+
teacher_box_query,
|
| 110 |
+
toggle_auto_execution
|
| 111 |
+
],
|
| 112 |
+
max_steps=6, # Maximum number of reasoning steps.
|
| 113 |
+
verbosity_level=1, # Level of detail in agent responses.
|
| 114 |
+
prompt_templates=prompt_templates
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
# Launch the interactive UI using Gradio.
|
| 118 |
+
if __name__ == "__main__":
|
| 119 |
+
GradioUI(agent).launch(server_name="0.0.0.0", server_port=7860)
|