Spaces:
Sleeping
Sleeping
| from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool | |
| import datetime | |
| import requests | |
| import pytz | |
| import yaml | |
| from tools.final_answer import FinalAnswerTool | |
| from Gradio_UI import GradioUI | |
| from xml.etree import ElementTree | |
| def get_lastest_filing_info(company: str = None, cik: str = None, type: str = "10-K", dateb: str = None, start: str = '0', count: str = '50') -> str: | |
| """A tool that gets the most recent 10-K filings (including addendums) of a company from the SEC. | |
| Args: | |
| company: The name of the company. | |
| cik: The Central Index Key (CIK) of the company. | |
| type: The type of filing. (Default is "10-K" for this function.) | |
| dateb: The date before which the filings were made (format: YYYYMMDD). | |
| start: The starting point for pagination. | |
| count: The number of filings to retrieve per page. | |
| Returns: | |
| The latest filings of a company from the SEC. | |
| """ | |
| # Validate dateb format | |
| if dateb: | |
| try: | |
| datetime.datetime.strptime(dateb, "%Y%m%d") | |
| except ValueError as e: | |
| print(f"Date validation error: {e}") | |
| return {"error": str(e)} | |
| # Ensure dateb is not after the current time | |
| current_date = datetime.datetime.now().strftime("%Y%m%d") | |
| if dateb and dateb > current_date: | |
| return {"error": "dateb cannot be after the current date."} | |
| base_url = "https://www.sec.gov/cgi-bin/browse-edgar?action=getcurrent" | |
| params = { | |
| "CIK": cik, | |
| "type": type, | |
| "company": company, | |
| "dateb": dateb, | |
| "owner": "include", | |
| "start": start, | |
| "count": count, | |
| "output": "atom" | |
| } | |
| # Set a proper User-Agent header per SEC guidelines | |
| headers = { | |
| "User-Agent": "FuncPhenomenon SMOLAgentTest/1.0" | |
| } | |
| response = requests.get(base_url, params=params, headers=headers) | |
| if response.status_code != 200: | |
| print(f"SEC retrieval error: {response.text}") | |
| return {"error": response.text} | |
| filings = [] | |
| try: | |
| feed = response.content | |
| root = ElementTree.fromstring(feed) | |
| # Process filings from the first page | |
| for entry in root.findall("{http://www.w3.org/2005/Atom}entry"): | |
| filing = { | |
| "title": entry.find("{http://www.w3.org/2005/Atom}title").text, | |
| "link": entry.find("{http://www.w3.org/2005/Atom}link").attrib['href'], | |
| "summary": entry.find("{http://www.w3.org/2005/Atom}summary").text, | |
| "updated": entry.find("{http://www.w3.org/2005/Atom}updated").text, | |
| "category": entry.find("{http://www.w3.org/2005/Atom}category").attrib['term'], | |
| "id": entry.find("{http://www.w3.org/2005/Atom}id").text | |
| } | |
| # Only include filings where category starts with "10-K" | |
| if filing["category"].startswith("10-K"): | |
| filings.append(filing) | |
| # Fetch additional pages until we have 100 filings or no more are available | |
| while len(filings) < 100: | |
| params["start"] = str(int(params["start"]) + int(params["count"])) | |
| response = requests.get(base_url, params=params, headers=headers) | |
| if response.status_code != 200: | |
| print(f"SEC retrieval error on pagination: {response.text}") | |
| break | |
| feed = response.content | |
| root = ElementTree.fromstring(feed) | |
| entries = root.findall("{http://www.w3.org/2005/Atom}entry") | |
| if not entries: | |
| break | |
| for entry in entries: | |
| filing = { | |
| "title": entry.find("{http://www.w3.org/2005/Atom}title").text, | |
| "link": entry.find("{http://www.w3.org/2005/Atom}link").attrib['href'], | |
| "summary": entry.find("{http://www.w3.org/2005/Atom}summary").text, | |
| "updated": entry.find("{http://www.w3.org/2005/Atom}updated").text, | |
| "category": entry.find("{http://www.w3.org/2005/Atom}category").attrib['term'], | |
| "id": entry.find("{http://www.w3.org/2005/Atom}id").text | |
| } | |
| if filing["category"].startswith("10-K"): | |
| filings.append(filing) | |
| if len(filings) >= 100: | |
| break | |
| except Exception as e: | |
| return {"error": f"Failed to parse XML: {str(e)}"} | |
| return filings | |
| def get_current_time_in_timezone(timezone: str) -> str: | |
| """A tool that fetches the current local time in a specified timezone. | |
| Args: | |
| timezone: A string representing a valid timezone (e.g., 'America/New_York'). | |
| """ | |
| try: | |
| # Create timezone object | |
| tz = pytz.timezone(timezone) | |
| # Get current time in that timezone | |
| local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") | |
| return f"The current local time in {timezone} is: {local_time}" | |
| except Exception as e: | |
| return f"Error fetching time for timezone '{timezone}': {str(e)}" | |
| final_answer = FinalAnswerTool() | |
| # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder: | |
| # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' | |
| model = HfApiModel( | |
| max_tokens=2096, | |
| temperature=0.5, | |
| model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded | |
| custom_role_conversions=None, | |
| ) | |
| # Import tool from Hub | |
| image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) | |
| with open("prompts.yaml", 'r') as stream: | |
| prompt_templates = yaml.safe_load(stream) | |
| agent = CodeAgent( | |
| model=model, | |
| tools=[get_lastest_filing_info, get_current_time_in_timezone, final_answer], ## add your tools here (don't remove final answer) | |
| max_steps=6, | |
| verbosity_level=1, | |
| grammar=None, | |
| planning_interval=None, | |
| name=None, | |
| description=None, | |
| prompt_templates=prompt_templates | |
| ) | |
| GradioUI(agent).launch() |