import requests from bs4 import BeautifulSoup import json import os class WebSearchTools: @staticmethod def search_images(query): search_url = f"https://www.google.com/search?q={query}&tbm=isch" headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" } response = requests.get(search_url, headers=headers) soup = BeautifulSoup(response.text, "html.parser") images = [] for item in soup.find_all('img'): img_url = item.get('src') if img_url and img_url.startswith('http'): images.append(img_url) return images[:5] # Limit to first 5 images for brevity @staticmethod def download_image(img_url, save_path): img_data = requests.get(img_url).content with open(save_path, 'wb') as handler: handler.write(img_data) @staticmethod def scrape_and_summarize_website(url): response = requests.get(url) soup = BeautifulSoup(response.text, 'html.parser') paragraphs = soup.find_all('p') text = ' '.join([para.get_text() for para in paragraphs]) return text # Example usage if __name__ == "__main__": image_urls = WebSearchTools.search_images("random forest") for idx, img_url in enumerate(image_urls): WebSearchTools.download_image(img_url, f'image_{idx}.png')