RemVdH commited on
Commit
8d9dd28
·
verified ·
1 Parent(s): 369c0d5

Update app.py

Browse files

Make the step towards effectively let it work as an agent

Files changed (1) hide show
  1. app.py +76 -58
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from smolagents import CodeAgent, HfApiModel, tool
4
  from tools.final_answer import FinalAnswerTool
 
5
 
6
  @tool
7
  def search_wikipedia_pages(search_string: str) -> str:
@@ -21,64 +22,81 @@ def search_wikipedia_pages(search_string: str) -> str:
21
 
22
  final_answer = FinalAnswerTool()
23
 
24
- """
25
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
26
- """
27
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
28
-
29
-
30
- def respond(
31
- message,
32
- history: list[tuple[str, str]],
33
- system_message,
34
- max_tokens,
35
- temperature,
36
- top_p,
37
- ):
38
- messages = [{"role": "system", "content": system_message}]
39
-
40
- for val in history:
41
- if val[0]:
42
- messages.append({"role": "user", "content": val[0]})
43
- if val[1]:
44
- messages.append({"role": "assistant", "content": val[1]})
45
-
46
- messages.append({"role": "user", "content": message})
47
-
48
- response = ""
49
-
50
- for message in client.chat_completion(
51
- messages,
52
- max_tokens=max_tokens,
53
- stream=True,
54
- temperature=temperature,
55
- top_p=top_p,
56
- ):
57
- token = message.choices[0].delta.content
58
-
59
- response += token
60
- yield response
61
-
62
-
63
- """
64
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
65
- """
66
- demo = gr.ChatInterface(
67
- respond,
68
- additional_inputs=[
69
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
70
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
71
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
72
- gr.Slider(
73
- minimum=0.1,
74
- maximum=1.0,
75
- value=0.95,
76
- step=0.05,
77
- label="Top-p (nucleus sampling)",
78
- ),
79
- ],
80
  )
81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
- if __name__ == "__main__":
84
- demo.launch()
 
2
  from huggingface_hub import InferenceClient
3
  from smolagents import CodeAgent, HfApiModel, tool
4
  from tools.final_answer import FinalAnswerTool
5
+ from Gradio_UI import GradioUI
6
 
7
  @tool
8
  def search_wikipedia_pages(search_string: str) -> str:
 
22
 
23
  final_answer = FinalAnswerTool()
24
 
25
+ agent = CodeAgent(
26
+ model=model,
27
+ tools=[final_answer, search_wikipedia_pages],
28
+ max_steps=6,
29
+ verbosity_level=1,
30
+ grammar=None,
31
+ planning_interval=None,
32
+ name=None,
33
+ description=None,
34
+ prompt_templates=prompt_templates
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  )
36
 
37
+ GradioUI(agent).launch()
38
+
39
+
40
+
41
+
42
+ # """
43
+ # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
44
+ # """
45
+ # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
46
+
47
+
48
+ # def respond(
49
+ # message,
50
+ # history: list[tuple[str, str]],
51
+ # system_message,
52
+ # max_tokens,
53
+ # temperature,
54
+ # top_p,
55
+ # ):
56
+ # messages = [{"role": "system", "content": system_message}]
57
+
58
+ # for val in history:
59
+ # if val[0]:
60
+ # messages.append({"role": "user", "content": val[0]})
61
+ # if val[1]:
62
+ # messages.append({"role": "assistant", "content": val[1]})
63
+
64
+ # messages.append({"role": "user", "content": message})
65
+
66
+ # response = ""
67
+
68
+ # for message in client.chat_completion(
69
+ # messages,
70
+ # max_tokens=max_tokens,
71
+ # stream=True,
72
+ # temperature=temperature,
73
+ # top_p=top_p,
74
+ # ):
75
+ # token = message.choices[0].delta.content
76
+
77
+ # response += token
78
+ # yield response
79
+
80
+
81
+ # """
82
+ # For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
83
+ # """
84
+ # demo = gr.ChatInterface(
85
+ # respond,
86
+ # additional_inputs=[
87
+ # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
88
+ # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
89
+ # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
90
+ # gr.Slider(
91
+ # minimum=0.1,
92
+ # maximum=1.0,
93
+ # value=0.95,
94
+ # step=0.05,
95
+ # label="Top-p (nucleus sampling)",
96
+ # ),
97
+ # ],
98
+ # )
99
+
100
 
101
+ # if __name__ == "__main__":
102
+ # demo.launch()