mwill-AImission commited on
Commit
536a8b2
·
verified ·
1 Parent(s): 8837bc4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -51
app.py CHANGED
@@ -1,82 +1,81 @@
1
- # app.py
2
 
3
- # --------------------------------------------
4
- # Importing necessary parts of our AI framework.
5
- # These help us create our "agent" (our helpful assistant) and define "tools" (small functions that do specific tasks).
6
  from smolagents import CodeAgent, HfApiModel, tool
7
 
8
- # --------------------------------------------
9
- # Importing a library to work with YAML files.
10
- # YAML files are used to store settings and instructions in a simple text format.
11
  import yaml
12
 
13
- # --------------------------------------------
14
- # Importing our FinalAnswerTool and GradioUI.
15
- # FinalAnswerTool helps produce the final output that the user sees.
16
- # GradioUI sets up a simple webpage so users can talk to our agent.
17
  from tools.final_answer import FinalAnswerTool
18
  from Gradio_UI import GradioUI
19
 
20
- # ------------------------------------------------------------------------------------
21
- # Here we define a tool called simplify_text.
22
- # A "tool" is a small piece of code that performs one job.
23
- # In this case, the job is to take technical text and make it look simpler.
 
 
 
 
 
 
 
 
 
 
 
 
24
  @tool
25
  def simplify_text(text: str) -> str:
26
  """
27
- Simplifies a technical sentence or paragraph into plain language.
28
 
29
  Args:
30
  text: A technical sentence or paragraph.
31
-
32
  Returns:
33
- A simplified version of the text.
34
  """
35
- # This simple version just adds "Simplified:" before the original text.
36
- # Later, you can make this more advanced by changing the wording.
37
- return "Simplified: " + text
38
-
39
- # ------------------------------------------------------------------------------------
40
- # Now we set up the AI model.
41
- # Think of the model as the "brain" of our agent that understands questions and comes up with answers.
42
- # We use a model hosted at a specific web address.
43
- model = HfApiModel(
44
- max_tokens=2096, # This sets a limit on how long the model's answer can be.
45
- temperature=0.5, # This controls the creativity of the answer (0.5 is moderate).
46
- # We are using an alternative endpoint here. If the original model is overloaded,
47
- # you can use this endpoint instead.
48
- model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',
49
- # If you ever want to switch back to the original model, you could use:
50
- # model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
51
- custom_role_conversions=None
52
- )
53
 
54
- # ------------------------------------------------------------------------------------
55
- # Next, we load some prompt templates from a file called "prompts.yaml".
56
- # Prompt templates act like a script that tells the model how to behave.
57
  with open("prompts.yaml", 'r') as stream:
58
  prompt_templates = yaml.safe_load(stream)
59
 
60
- # ------------------------------------------------------------------------------------
61
- # Here we create our AI agent.
62
- # The agent is like your digital helper that uses the model (its brain) and the tools (its abilities) to answer questions.
63
- # We give it two tools:
64
- # - FinalAnswerTool: This tool helps package the final answer.
65
- # - simplify_text: Our custom tool that makes technical language simpler.
66
  agent = CodeAgent(
67
  model=model,
68
  tools=[
69
  FinalAnswerTool(),
70
  simplify_text
71
  ],
72
- max_steps=6, # This sets a limit on how many steps the agent can take to come up with an answer.
73
- verbosity_level=1, # This controls how detailed the agent's inner thoughts are (for debugging or learning).
74
  prompt_templates=prompt_templates
75
  )
76
 
77
- # ------------------------------------------------------------------------------------
78
- # Finally, we start the user interface so that people can interact with the agent.
79
- # Gradio makes a simple webpage where you can type questions and see answers.
80
- # The following line tells Gradio to listen on all network addresses (0.0.0.0) at port 7860.
81
  if __name__ == "__main__":
82
  GradioUI(agent).launch(server_name="0.0.0.0", server_port=7860)
 
 
1
 
2
+ # Import necessary parts of our AI framework to create our agent and define our tools.
 
 
3
  from smolagents import CodeAgent, HfApiModel, tool
4
 
5
+ # ------------------------------------------------------------------------------
6
+ # Import YAML to load additional instructions from a file.
 
7
  import yaml
8
 
9
+ # ------------------------------------------------------------------------------
10
+ # Import our helper tool for producing the final answer and the user interface.
 
 
11
  from tools.final_answer import FinalAnswerTool
12
  from Gradio_UI import GradioUI
13
 
14
+ # ------------------------------------------------------------------------------
15
+ # Set up the AI model (the "brain" of our agent).
16
+ # The model processes text based on prompts and produces answers.
17
+ # We use an alternative endpoint if needed.
18
+ model = HfApiModel(
19
+ max_tokens=2096, # Maximum length of the answer.
20
+ temperature=0.5, # Controls how creative the answer is.
21
+ model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',
22
+ # (You can switch to another model by changing the model_id.)
23
+ custom_role_conversions=None
24
+ )
25
+
26
+ # ------------------------------------------------------------------------------
27
+ # Define the simplify_text tool.
28
+ # This tool takes technical text and asks the LLM to convert it into plain, simple language.
29
+ # It also instructs the model to define any technical jargon in easy-to-understand terms.
30
  @tool
31
  def simplify_text(text: str) -> str:
32
  """
33
+ Converts technical text into plain language, avoiding jargon and explaining any technical terms.
34
 
35
  Args:
36
  text: A technical sentence or paragraph.
37
+
38
  Returns:
39
+ A simplified version of the text in common language.
40
  """
41
+ # Create a prompt that instructs the model to rephrase the text.
42
+ # The prompt tells the model:
43
+ # - "Convert this technical text into simple, common language."
44
+ # - "Avoid using technical jargon, and if you use any, explain it in plain language."
45
+ prompt = (
46
+ "Convert the following technical text into plain, common language. Avoid technical jargon. "
47
+ "If you must use technical terms, please define them in simple language:\n\n"
48
+ f"{text}\n\nSimplified version:"
49
+ )
50
+ # Call the model with our prompt and store its response.
51
+ response = model(prompt)
52
+ # Return the response, which should be a simplified version of the input text.
53
+ return response
 
 
 
 
 
54
 
55
+ # ------------------------------------------------------------------------------
56
+ # Load prompt templates from the 'prompts.yaml' file.
57
+ # These templates provide extra instructions to help guide the model.
58
  with open("prompts.yaml", 'r') as stream:
59
  prompt_templates = yaml.safe_load(stream)
60
 
61
+ # ------------------------------------------------------------------------------
62
+ # Create our AI agent by combining the model with our tools.
63
+ # Our agent uses the FinalAnswerTool (to package the final answer)
64
+ # and the simplify_text tool (to rephrase technical text into simple language).
 
 
65
  agent = CodeAgent(
66
  model=model,
67
  tools=[
68
  FinalAnswerTool(),
69
  simplify_text
70
  ],
71
+ max_steps=6, # Limit the number of steps the agent can use to produce an answer.
72
+ verbosity_level=1, # Lower verbosity for a cleaner output.
73
  prompt_templates=prompt_templates
74
  )
75
 
76
+ # ------------------------------------------------------------------------------
77
+ # Launch the interactive user interface.
78
+ # Gradio creates a simple webpage for users to type in their text and see the agent's answer.
79
+ # The interface listens on all network addresses (0.0.0.0) on port 7860.
80
  if __name__ == "__main__":
81
  GradioUI(agent).launch(server_name="0.0.0.0", server_port=7860)