Update app.py
Browse files
app.py
CHANGED
|
@@ -1,82 +1,81 @@
|
|
| 1 |
-
# app.py
|
| 2 |
|
| 3 |
-
#
|
| 4 |
-
# Importing necessary parts of our AI framework.
|
| 5 |
-
# These help us create our "agent" (our helpful assistant) and define "tools" (small functions that do specific tasks).
|
| 6 |
from smolagents import CodeAgent, HfApiModel, tool
|
| 7 |
|
| 8 |
-
#
|
| 9 |
-
#
|
| 10 |
-
# YAML files are used to store settings and instructions in a simple text format.
|
| 11 |
import yaml
|
| 12 |
|
| 13 |
-
#
|
| 14 |
-
#
|
| 15 |
-
# FinalAnswerTool helps produce the final output that the user sees.
|
| 16 |
-
# GradioUI sets up a simple webpage so users can talk to our agent.
|
| 17 |
from tools.final_answer import FinalAnswerTool
|
| 18 |
from Gradio_UI import GradioUI
|
| 19 |
|
| 20 |
-
#
|
| 21 |
-
#
|
| 22 |
-
#
|
| 23 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
@tool
|
| 25 |
def simplify_text(text: str) -> str:
|
| 26 |
"""
|
| 27 |
-
|
| 28 |
|
| 29 |
Args:
|
| 30 |
text: A technical sentence or paragraph.
|
| 31 |
-
|
| 32 |
Returns:
|
| 33 |
-
A simplified version of the text.
|
| 34 |
"""
|
| 35 |
-
#
|
| 36 |
-
#
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
#
|
| 47 |
-
|
| 48 |
-
model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',
|
| 49 |
-
# If you ever want to switch back to the original model, you could use:
|
| 50 |
-
# model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
| 51 |
-
custom_role_conversions=None
|
| 52 |
-
)
|
| 53 |
|
| 54 |
-
#
|
| 55 |
-
#
|
| 56 |
-
#
|
| 57 |
with open("prompts.yaml", 'r') as stream:
|
| 58 |
prompt_templates = yaml.safe_load(stream)
|
| 59 |
|
| 60 |
-
#
|
| 61 |
-
#
|
| 62 |
-
#
|
| 63 |
-
#
|
| 64 |
-
# - FinalAnswerTool: This tool helps package the final answer.
|
| 65 |
-
# - simplify_text: Our custom tool that makes technical language simpler.
|
| 66 |
agent = CodeAgent(
|
| 67 |
model=model,
|
| 68 |
tools=[
|
| 69 |
FinalAnswerTool(),
|
| 70 |
simplify_text
|
| 71 |
],
|
| 72 |
-
max_steps=6, #
|
| 73 |
-
verbosity_level=1, #
|
| 74 |
prompt_templates=prompt_templates
|
| 75 |
)
|
| 76 |
|
| 77 |
-
#
|
| 78 |
-
#
|
| 79 |
-
# Gradio
|
| 80 |
-
# The
|
| 81 |
if __name__ == "__main__":
|
| 82 |
GradioUI(agent).launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
|
|
|
| 1 |
|
| 2 |
+
# Import necessary parts of our AI framework to create our agent and define our tools.
|
|
|
|
|
|
|
| 3 |
from smolagents import CodeAgent, HfApiModel, tool
|
| 4 |
|
| 5 |
+
# ------------------------------------------------------------------------------
|
| 6 |
+
# Import YAML to load additional instructions from a file.
|
|
|
|
| 7 |
import yaml
|
| 8 |
|
| 9 |
+
# ------------------------------------------------------------------------------
|
| 10 |
+
# Import our helper tool for producing the final answer and the user interface.
|
|
|
|
|
|
|
| 11 |
from tools.final_answer import FinalAnswerTool
|
| 12 |
from Gradio_UI import GradioUI
|
| 13 |
|
| 14 |
+
# ------------------------------------------------------------------------------
|
| 15 |
+
# Set up the AI model (the "brain" of our agent).
|
| 16 |
+
# The model processes text based on prompts and produces answers.
|
| 17 |
+
# We use an alternative endpoint if needed.
|
| 18 |
+
model = HfApiModel(
|
| 19 |
+
max_tokens=2096, # Maximum length of the answer.
|
| 20 |
+
temperature=0.5, # Controls how creative the answer is.
|
| 21 |
+
model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',
|
| 22 |
+
# (You can switch to another model by changing the model_id.)
|
| 23 |
+
custom_role_conversions=None
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
# ------------------------------------------------------------------------------
|
| 27 |
+
# Define the simplify_text tool.
|
| 28 |
+
# This tool takes technical text and asks the LLM to convert it into plain, simple language.
|
| 29 |
+
# It also instructs the model to define any technical jargon in easy-to-understand terms.
|
| 30 |
@tool
|
| 31 |
def simplify_text(text: str) -> str:
|
| 32 |
"""
|
| 33 |
+
Converts technical text into plain language, avoiding jargon and explaining any technical terms.
|
| 34 |
|
| 35 |
Args:
|
| 36 |
text: A technical sentence or paragraph.
|
| 37 |
+
|
| 38 |
Returns:
|
| 39 |
+
A simplified version of the text in common language.
|
| 40 |
"""
|
| 41 |
+
# Create a prompt that instructs the model to rephrase the text.
|
| 42 |
+
# The prompt tells the model:
|
| 43 |
+
# - "Convert this technical text into simple, common language."
|
| 44 |
+
# - "Avoid using technical jargon, and if you use any, explain it in plain language."
|
| 45 |
+
prompt = (
|
| 46 |
+
"Convert the following technical text into plain, common language. Avoid technical jargon. "
|
| 47 |
+
"If you must use technical terms, please define them in simple language:\n\n"
|
| 48 |
+
f"{text}\n\nSimplified version:"
|
| 49 |
+
)
|
| 50 |
+
# Call the model with our prompt and store its response.
|
| 51 |
+
response = model(prompt)
|
| 52 |
+
# Return the response, which should be a simplified version of the input text.
|
| 53 |
+
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
+
# ------------------------------------------------------------------------------
|
| 56 |
+
# Load prompt templates from the 'prompts.yaml' file.
|
| 57 |
+
# These templates provide extra instructions to help guide the model.
|
| 58 |
with open("prompts.yaml", 'r') as stream:
|
| 59 |
prompt_templates = yaml.safe_load(stream)
|
| 60 |
|
| 61 |
+
# ------------------------------------------------------------------------------
|
| 62 |
+
# Create our AI agent by combining the model with our tools.
|
| 63 |
+
# Our agent uses the FinalAnswerTool (to package the final answer)
|
| 64 |
+
# and the simplify_text tool (to rephrase technical text into simple language).
|
|
|
|
|
|
|
| 65 |
agent = CodeAgent(
|
| 66 |
model=model,
|
| 67 |
tools=[
|
| 68 |
FinalAnswerTool(),
|
| 69 |
simplify_text
|
| 70 |
],
|
| 71 |
+
max_steps=6, # Limit the number of steps the agent can use to produce an answer.
|
| 72 |
+
verbosity_level=1, # Lower verbosity for a cleaner output.
|
| 73 |
prompt_templates=prompt_templates
|
| 74 |
)
|
| 75 |
|
| 76 |
+
# ------------------------------------------------------------------------------
|
| 77 |
+
# Launch the interactive user interface.
|
| 78 |
+
# Gradio creates a simple webpage for users to type in their text and see the agent's answer.
|
| 79 |
+
# The interface listens on all network addresses (0.0.0.0) on port 7860.
|
| 80 |
if __name__ == "__main__":
|
| 81 |
GradioUI(agent).launch(server_name="0.0.0.0", server_port=7860)
|