carloscapote commited on
Commit
b08871e
·
1 Parent(s): f5d0a6f

add an rss reader tool and enable local runs with ollama

Browse files
Files changed (8) hide show
  1. .gitignore +1 -0
  2. Gradio_UI.py +1 -1
  3. README.md +26 -0
  4. app.py +28 -28
  5. huggingface.env +3 -0
  6. ollama.env +5 -0
  7. requirements.txt +3 -0
  8. tools/read_rss_feed.py +23 -0
.gitignore CHANGED
@@ -1,3 +1,4 @@
1
  /.gradio
2
  /.venv
 
3
  __pycache__
 
1
  /.gradio
2
  /.venv
3
+ /.env
4
  __pycache__
Gradio_UI.py CHANGED
@@ -290,7 +290,7 @@ class GradioUI:
290
  [stored_messages, text_input],
291
  ).then(self.interact_with_agent, [stored_messages, chatbot], [chatbot])
292
 
293
- demo.launch(debug=True, share=True, **kwargs)
294
 
295
 
296
  __all__ = ["stream_to_gradio", "GradioUI"]
 
290
  [stored_messages, text_input],
291
  ).then(self.interact_with_agent, [stored_messages, chatbot], [chatbot])
292
 
293
+ demo.launch(debug=True, share=os.getenv("GRADIO_SHARE") == "true", **kwargs)
294
 
295
 
296
  __all__ = ["stream_to_gradio", "GradioUI"]
README.md CHANGED
@@ -31,4 +31,30 @@ source .venv/bin/activate
31
  pip install -r requirements.txt
32
  ```
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  For additional configuration, check out HuggingFace's [spaces configuration reference](https://huggingface.co/docs/hub/spaces-config-reference).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  pip install -r requirements.txt
32
  ```
33
 
34
+ ## Configuration
35
+
36
+ ### Run locally agains a HuggingFace model
37
+
38
+ If you want to run this locally agains a model hosted by HuggingFace, you'll have to set up a [user access token](https://huggingface.co/docs/hub/security-tokens).
39
+
40
+ ```bash
41
+ # Copy the `huggingface.env` as `.env`
42
+ cp huggingface.env .env
43
+
44
+ # Edit the file and set your `HF_TOKEN`
45
+ ```
46
+
47
  For additional configuration, check out HuggingFace's [spaces configuration reference](https://huggingface.co/docs/hub/spaces-config-reference).
48
+
49
+ ### Run locally against a self-hosted Ollama model
50
+
51
+ ```bash
52
+ # Copy the `ollama.env` as `.env`
53
+ cp ollama.env .env
54
+
55
+ # Edit the file and set your `OLLAMA_ENDPOINT` and `OLLAMA_MODEL`
56
+ ```
57
+
58
+ ### General settings
59
+
60
+ Set the value of `GRADIO_SHARE` to `true` if you want to share your Gradio application.
app.py CHANGED
@@ -1,22 +1,16 @@
1
- from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
2
  import datetime
3
  import requests
4
  import pytz
5
  import yaml
6
- from tools.final_answer import FinalAnswerTool
 
7
 
 
 
8
  from Gradio_UI import GradioUI
9
 
10
- # Below is an example of a tool that does nothing. Amaze us with your creativity !
11
- @tool
12
- def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
13
- #Keep this format for the description / args / args description but feel free to modify the tool
14
- """A tool that does nothing yet
15
- Args:
16
- arg1: the first argument
17
- arg2: the second argument
18
- """
19
- return "What magic will you build ?"
20
 
21
  @tool
22
  def get_current_time_in_timezone(timezone: str) -> str:
@@ -33,29 +27,36 @@ def get_current_time_in_timezone(timezone: str) -> str:
33
  except Exception as e:
34
  return f"Error fetching time for timezone '{timezone}': {str(e)}"
35
 
36
-
37
- final_answer = FinalAnswerTool()
38
-
39
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
40
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
41
 
42
- model = HfApiModel(
43
- max_tokens=2096,
44
- temperature=0.5,
45
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
46
- custom_role_conversions=None,
47
- )
 
 
 
 
 
 
 
 
 
 
48
 
 
 
49
 
50
- # Import tool from Hub
51
- image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
 
52
 
53
- with open("prompts.yaml", 'r') as stream:
54
- prompt_templates = yaml.safe_load(stream)
55
-
56
  agent = CodeAgent(
57
  model=model,
58
- tools=[final_answer], ## add your tools here (don't remove final answer)
59
  max_steps=6,
60
  verbosity_level=1,
61
  grammar=None,
@@ -65,5 +66,4 @@ agent = CodeAgent(
65
  prompt_templates=prompt_templates
66
  )
67
 
68
-
69
  GradioUI(agent).launch()
 
1
+ import os
2
  import datetime
3
  import requests
4
  import pytz
5
  import yaml
6
+ from dotenv import load_dotenv
7
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, LiteLLMModel, tool
8
 
9
+ from tools.read_rss_feed import ReadRssFeedTool
10
+ from tools.final_answer import FinalAnswerTool
11
  from Gradio_UI import GradioUI
12
 
13
+ load_dotenv()
 
 
 
 
 
 
 
 
 
14
 
15
  @tool
16
  def get_current_time_in_timezone(timezone: str) -> str:
 
27
  except Exception as e:
28
  return f"Error fetching time for timezone '{timezone}': {str(e)}"
29
 
 
 
 
30
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
31
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
32
 
33
+ def choose_model():
34
+ if os.getenv("HF_TOKEN"):
35
+ print("Using HuggingFace")
36
+ return HfApiModel(
37
+ max_tokens=2096,
38
+ temperature=0.5,
39
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
40
+ custom_role_conversions=None,
41
+ )
42
+ else:
43
+ print("Using Ollama")
44
+ return LiteLLMModel(
45
+ model_id=os.getenv("OLLAMA_MODEL"),
46
+ api_base=os.getenv("OLLAMA_ENDPOINT"),
47
+ api_key=os.getenv("OLLAMA_KEY"),
48
+ )
49
 
50
+ with open("prompts.yaml", "r") as stream:
51
+ prompt_templates = yaml.safe_load(stream)
52
 
53
+ model = choose_model()
54
+ read_rss_feed = ReadRssFeedTool()
55
+ final_answer = FinalAnswerTool()
56
 
 
 
 
57
  agent = CodeAgent(
58
  model=model,
59
+ tools=[read_rss_feed, final_answer],
60
  max_steps=6,
61
  verbosity_level=1,
62
  grammar=None,
 
66
  prompt_templates=prompt_templates
67
  )
68
 
 
69
  GradioUI(agent).launch()
huggingface.env ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ HF_TOKEN=hf_This_Is_A_Fake_Token
2
+
3
+ GRADIO_SHARE=false
ollama.env ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ OLLAMA_ENDPOINT="http://localhost:11434"
2
+ OLLAMA_MODEL="ollama_chat/llama3-groq-tool-use"
3
+ OLLAMA_KEY=""
4
+
5
+ GRADIO_SHARE=false
requirements.txt CHANGED
@@ -1,5 +1,8 @@
1
  markdownify
2
  smolagents
 
 
3
  requests
4
  duckduckgo_search
5
  pandas
 
 
1
  markdownify
2
  smolagents
3
+ smolagents[gradio]
4
+ smolagents[litellm]
5
  requests
6
  duckduckgo_search
7
  pandas
8
+ rss_parser
tools/read_rss_feed.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from smolagents.tools import Tool
2
+
3
+ class ReadRssFeedTool(Tool):
4
+ name = "read_rss_feed"
5
+ description = "Read the articles from an RSS feed."
6
+ inputs = {'url': {'type': 'string', 'description': 'The url of the RSS feed (example: https://www.eldiario.es/rss).'}}
7
+ output_type = "string"
8
+
9
+ def forward(self, url: str) -> str:
10
+ """Read articles from an RSS feed."""
11
+ import re
12
+ from requests import get
13
+ from rss_parser import RSSParser
14
+ from markdownify import markdownify
15
+ response = get(url)
16
+ rss = RSSParser.parse(response.text)
17
+ articles = "# Articles\n\n"
18
+ for item in rss.channel.items:
19
+ articles += "## " + item.title.content + "\n"
20
+ articles += item.pub_date.content + "\n\n"
21
+ content = markdownify(item.description.content).strip()
22
+ articles += content + "\n\n"
23
+ return articles