mwill-AImission commited on
Commit
8837bc4
·
verified ·
1 Parent(s): 2d57c30

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -17
app.py CHANGED
@@ -1,54 +1,82 @@
 
1
 
2
- # AI Agent Framework Imports
 
 
3
  from smolagents import CodeAgent, HfApiModel, tool
4
 
5
- # Standard Library Imports
 
 
6
  import yaml
7
 
8
- # Final Answer and UI Handling
 
 
 
9
  from tools.final_answer import FinalAnswerTool
10
  from Gradio_UI import GradioUI
11
 
12
- # --------------------------------------------
13
- # Tool: simplify_text
 
 
14
  @tool
15
  def simplify_text(text: str) -> str:
16
- """Simplifies a technical sentence or paragraph into plain language.
17
-
 
18
  Args:
19
  text: A technical sentence or paragraph.
20
 
21
  Returns:
22
  A simplified version of the text.
23
  """
 
 
24
  return "Simplified: " + text
25
 
26
- # --------------------------------------------
27
- # Configure the AI Model.
28
- # If the primary model is overloaded, you may later switch the model_id to an alternative endpoint.
 
29
  model = HfApiModel(
30
- max_tokens=2096,
31
- temperature=0.5,
 
 
32
  model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',
 
 
33
  custom_role_conversions=None
34
  )
35
 
36
- # Load prompt templates from a YAML configuration file.
 
 
37
  with open("prompts.yaml", 'r') as stream:
38
  prompt_templates = yaml.safe_load(stream)
39
 
40
- # Create the AI agent with the defined tools.
 
 
 
 
 
41
  agent = CodeAgent(
42
  model=model,
43
  tools=[
44
  FinalAnswerTool(),
45
  simplify_text
46
  ],
47
- max_steps=6,
48
- verbosity_level=1,
49
  prompt_templates=prompt_templates
50
  )
51
 
52
- # Launch the interactive UI using Gradio.
 
 
 
53
  if __name__ == "__main__":
54
  GradioUI(agent).launch(server_name="0.0.0.0", server_port=7860)
 
1
+ # app.py
2
 
3
+ # --------------------------------------------
4
+ # Importing necessary parts of our AI framework.
5
+ # These help us create our "agent" (our helpful assistant) and define "tools" (small functions that do specific tasks).
6
  from smolagents import CodeAgent, HfApiModel, tool
7
 
8
+ # --------------------------------------------
9
+ # Importing a library to work with YAML files.
10
+ # YAML files are used to store settings and instructions in a simple text format.
11
  import yaml
12
 
13
+ # --------------------------------------------
14
+ # Importing our FinalAnswerTool and GradioUI.
15
+ # FinalAnswerTool helps produce the final output that the user sees.
16
+ # GradioUI sets up a simple webpage so users can talk to our agent.
17
  from tools.final_answer import FinalAnswerTool
18
  from Gradio_UI import GradioUI
19
 
20
+ # ------------------------------------------------------------------------------------
21
+ # Here we define a tool called simplify_text.
22
+ # A "tool" is a small piece of code that performs one job.
23
+ # In this case, the job is to take technical text and make it look simpler.
24
  @tool
25
  def simplify_text(text: str) -> str:
26
+ """
27
+ Simplifies a technical sentence or paragraph into plain language.
28
+
29
  Args:
30
  text: A technical sentence or paragraph.
31
 
32
  Returns:
33
  A simplified version of the text.
34
  """
35
+ # This simple version just adds "Simplified:" before the original text.
36
+ # Later, you can make this more advanced by changing the wording.
37
  return "Simplified: " + text
38
 
39
+ # ------------------------------------------------------------------------------------
40
+ # Now we set up the AI model.
41
+ # Think of the model as the "brain" of our agent that understands questions and comes up with answers.
42
+ # We use a model hosted at a specific web address.
43
  model = HfApiModel(
44
+ max_tokens=2096, # This sets a limit on how long the model's answer can be.
45
+ temperature=0.5, # This controls the creativity of the answer (0.5 is moderate).
46
+ # We are using an alternative endpoint here. If the original model is overloaded,
47
+ # you can use this endpoint instead.
48
  model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',
49
+ # If you ever want to switch back to the original model, you could use:
50
+ # model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
51
  custom_role_conversions=None
52
  )
53
 
54
+ # ------------------------------------------------------------------------------------
55
+ # Next, we load some prompt templates from a file called "prompts.yaml".
56
+ # Prompt templates act like a script that tells the model how to behave.
57
  with open("prompts.yaml", 'r') as stream:
58
  prompt_templates = yaml.safe_load(stream)
59
 
60
+ # ------------------------------------------------------------------------------------
61
+ # Here we create our AI agent.
62
+ # The agent is like your digital helper that uses the model (its brain) and the tools (its abilities) to answer questions.
63
+ # We give it two tools:
64
+ # - FinalAnswerTool: This tool helps package the final answer.
65
+ # - simplify_text: Our custom tool that makes technical language simpler.
66
  agent = CodeAgent(
67
  model=model,
68
  tools=[
69
  FinalAnswerTool(),
70
  simplify_text
71
  ],
72
+ max_steps=6, # This sets a limit on how many steps the agent can take to come up with an answer.
73
+ verbosity_level=1, # This controls how detailed the agent's inner thoughts are (for debugging or learning).
74
  prompt_templates=prompt_templates
75
  )
76
 
77
+ # ------------------------------------------------------------------------------------
78
+ # Finally, we start the user interface so that people can interact with the agent.
79
+ # Gradio makes a simple webpage where you can type questions and see answers.
80
+ # The following line tells Gradio to listen on all network addresses (0.0.0.0) at port 7860.
81
  if __name__ == "__main__":
82
  GradioUI(agent).launch(server_name="0.0.0.0", server_port=7860)