File size: 4,471 Bytes
d1dfb02
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
from smolagents import DuckDuckGoSearchTool
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin



# Initialize the DuckDuckGo search tool
search_tool = DuckDuckGoSearchTool()

# Example usage
# results = search_tool("Who's the current President of France?")
# print(results)


from smolagents import Tool
import random

class WeatherInfoTool(Tool):
    name = "weather_info"
    description = "Fetches dummy weather information for a given location."
    inputs = {
        "location": {
            "type": "string",
            "description": "The location to get weather information for."
        }
    }
    output_type = "string"

    def forward(self, location: str):
        # Dummy weather data
        weather_conditions = [
            {"condition": "Rainy", "temp_c": 15},
            {"condition": "Clear", "temp_c": 25},
            {"condition": "Windy", "temp_c": 20}
        ]
        # Randomly select a weather condition
        data = random.choice(weather_conditions)
        return f"Weather in {location}: {data['condition']}, {data['temp_c']}°C"

# # Initialize the tool
# weather_info_tool = WeatherInfoTool()


from smolagents import Tool
from huggingface_hub import list_models

class HubStatsTool(Tool):
    name = "hub_stats"
    description = "Fetches the most downloaded model from a specific author on the Hugging Face Hub."
    inputs = {
        "author": {
            "type": "string",
            "description": "The username of the model author/organization to find models from."
        }
    }
    output_type = "string"

    def forward(self, author: str):
        try:
            # List models from the specified author, sorted by downloads
            models = list(list_models(author=author, sort="downloads", direction=-1, limit=1))
            
            if models:
                model = models[0]
                return f"The most downloaded model by {author} is {model.id} with {model.downloads:,} downloads."
            else:
                return f"No models found for author {author}."
        except Exception as e:
            return f"Error fetching models for {author}: {str(e)}"

# # Initialize the tool
# hub_stats_tool = HubStatsTool()

# # Example usage
# print(hub_stats_tool("facebook")) # Example: Get the most downloaded model by Facebook


# from smolagents import CodeAgent, InferenceClientModel

# # Initialize the Hugging Face model
# model = InferenceClientModel()

# # Create Alfred with all the tools
# alfred = CodeAgent(
#     tools=[search_tool, weather_info_tool, hub_stats_tool], 
#     model=model
# )

# # Example query Alfred might receive during the gala
# response = alfred.run("What is Facebook and what's their most popular model?")

# print("🎩 Alfred's Response:")
# print(response)




class ScrapingWeb(Tool):
    name = "scraping_web"
    description = "Scraping the website to extract all text information available"
    inputs = {
        "link": {
            "type": "string",
            "description": "The link of the website to scrape"
        }
    }
    output_type = "string"

    def forward(self, link: str):
        try:
            # 1. Fetch the page
            response = requests.get(link, timeout=10)
            response.raise_for_status()  # raise HTTPError for bad status codes
        except (requests.RequestException) as e:
            print(f"Error fetching {link}: {e}")
            return None

        # 2. Parse with BeautifulSoup
        soup = BeautifulSoup(response.text, "html.parser")

        # 3. Extract the page title
        title_tag = soup.find("title")
        title = title_tag.get_text(strip=True) if title_tag else None

        # 4. Extract all visible text
        #    We’ll grab the text of each <p>, <h1>-<h6>, and <div> (if needed).
        #    For a rough “all text”, you can also do soup.get_text(), but that includes scripts/styles.
        for script_or_style in soup(["script", "style"]):
            script_or_style.decompose()
        visible_text = soup.get_text(separator="\n", strip=True)

        # 5. Extract all hyperlinks (absolute URLs)
        links = []
        for a in soup.find_all("a", href=True):
            href = a["href"]
            absolute = urljoin(link, href)
            links.append(absolute)

        return visible_text
    
        # return {
        #     "title": title,
        #     "text": visible_text,
        #     "links": links
        # }