| | """ |
| | Bitcoin analysis tools for CrewAI agents - Simplified Version |
| | """ |
| |
|
| | import os |
| | from typing import Dict, Any, List, Optional, ClassVar |
| | from datetime import datetime, timedelta |
| | import pandas as pd |
| | import yfinance as yf |
| | from crewai.tools import BaseTool |
| | from pydantic import Field |
| | import requests |
| | from bs4 import BeautifulSoup |
| | import time |
| | import random |
| | import json |
| |
|
| | os.environ["SERPER_API_KEY"] = "your_serper_api_key" |
| |
|
| | class YahooBitcoinDataTool(BaseTool): |
| | """Tool for fetching Bitcoin data from Yahoo Finance""" |
| | name: str = "Bitcoin Price Data Tool" |
| | description: str = "Get the latest Bitcoin price data from Yahoo Finance" |
| | |
| | def _run(self) -> Dict[str, Any]: |
| | """ |
| | Fetch latest Bitcoin data from Yahoo Finance |
| | |
| | Returns: |
| | Dictionary with Bitcoin price data |
| | """ |
| | try: |
| | |
| | btc_data = yf.Ticker("BTC-USD") |
| | history = btc_data.history(period="1d") |
| | |
| | if history.empty: |
| | return { |
| | "error": "No data available", |
| | "price": 0, |
| | "market_cap": 0, |
| | "percent_change": 0, |
| | "trend": "unknown" |
| | } |
| | |
| | |
| | latest_price = history['Close'].iloc[-1] |
| | |
| | |
| | info = btc_data.info |
| | market_cap = info.get('marketCap', 0) |
| | |
| | |
| | if len(history) > 1: |
| | prev_close = history['Close'].iloc[-2] |
| | percent_change = ((latest_price - prev_close) / prev_close) * 100 |
| | else: |
| | percent_change = 0 |
| | |
| | |
| | trend = "bullish" if percent_change > 0 else "bearish" |
| | |
| | return { |
| | "price": round(latest_price, 2), |
| | "market_cap": market_cap, |
| | "percent_change": round(percent_change, 2), |
| | "trend": trend |
| | } |
| | |
| | except Exception as e: |
| | return { |
| | "error": str(e), |
| | "price": 0, |
| | "market_cap": 0, |
| | "percent_change": 0, |
| | "trend": "unknown" |
| | } |
| |
|
| | class RealBitcoinNewsTool(BaseTool): |
| | """Tool for fetching actual Bitcoin news from the web using direct HTTP requests""" |
| | name: str = "Bitcoin News Tool" |
| | description: str = "Fetches the latest Bitcoin news and analysis from financial news sources" |
| | |
| | |
| | USER_AGENTS: ClassVar[List[str]] = [ |
| | 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36', |
| | 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15', |
| | 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36' |
| | ] |
| | |
| | |
| | NEWS_SOURCES: ClassVar[Dict[str, str]] = { |
| | 'coindesk': 'https://www.coindesk.com/tag/bitcoin/', |
| | 'cointelegraph': 'https://cointelegraph.com/tags/bitcoin', |
| | 'decrypt': 'https://decrypt.co/categories/bitcoin', |
| | 'bitcoinmagazine': 'https://bitcoinmagazine.com/', |
| | 'google_news': 'https://news.google.com/search?q=bitcoin&hl=en-US' |
| | } |
| | |
| | def _run(self, source: str = None, count: int = 5) -> Dict[str, Any]: |
| | """ |
| | Fetch Bitcoin news directly from selected news sources |
| | |
| | Args: |
| | source: Optional specific source to check (e.g., "coindesk", "cointelegraph", "google_news") |
| | count: Maximum number of articles to retrieve |
| | |
| | Returns: |
| | Dictionary with Bitcoin news articles |
| | """ |
| | articles = [] |
| | |
| | try: |
| | |
| | sources_to_check = {source: self.NEWS_SOURCES[source]} if source and source in self.NEWS_SOURCES else self.NEWS_SOURCES |
| | |
| | |
| | for src_name, url in list(sources_to_check.items())[:3]: |
| | if len(articles) >= count: |
| | break |
| | |
| | try: |
| | |
| | source_articles = self._fetch_from_source(src_name, url, count - len(articles)) |
| | articles.extend(source_articles) |
| | |
| | |
| | time.sleep(1) |
| | |
| | except Exception as e: |
| | print(f"Error fetching from {src_name}: {e}") |
| | |
| | |
| | if not articles and 'google_news' in self.NEWS_SOURCES: |
| | try: |
| | google_articles = self._fetch_from_source('google_news', self.NEWS_SOURCES['google_news'], count) |
| | articles.extend(google_articles) |
| | except Exception as e: |
| | print(f"Error fetching from Google News: {e}") |
| | |
| | except Exception as e: |
| | print(f"General error fetching news: {e}") |
| | |
| | |
| | if not articles: |
| | return self._get_fallback_data() |
| | |
| | |
| | return { |
| | "articles": articles, |
| | "count": len(articles), |
| | "period": "Latest available data", |
| | "timestamp": datetime.now().isoformat() |
| | } |
| | |
| | def _fetch_from_source(self, source_name: str, url: str, count: int) -> List[Dict[str, Any]]: |
| | """Extract articles from a specific news source""" |
| | articles = [] |
| | headers = {'User-Agent': random.choice(self.USER_AGENTS)} |
| | |
| | try: |
| | response = requests.get(url, headers=headers, timeout=10) |
| | response.raise_for_status() |
| | |
| | soup = BeautifulSoup(response.text, 'html.parser') |
| | |
| | |
| | if source_name == 'coindesk': |
| | articles = self._parse_coindesk(soup, count) |
| | elif source_name == 'cointelegraph': |
| | articles = self._parse_cointelegraph(soup, count) |
| | elif source_name == 'decrypt': |
| | articles = self._parse_decrypt(soup, count) |
| | elif source_name == 'bitcoinmagazine': |
| | articles = self._parse_bitcoinmagazine(soup, count) |
| | elif source_name == 'google_news': |
| | articles = self._parse_google_news(soup, count) |
| | |
| | |
| | for article in articles: |
| | if 'source' not in article or not article['source']: |
| | article['source'] = source_name.title() |
| | if 'published_at' not in article or not article['published_at']: |
| | article['published_at'] = datetime.now().isoformat() |
| | |
| | return articles[:count] |
| | |
| | except Exception as e: |
| | print(f"Error in _fetch_from_source for {source_name}: {e}") |
| | return [] |
| | |
| | def _parse_coindesk(self, soup: BeautifulSoup, count: int) -> List[Dict[str, Any]]: |
| | """Parse CoinDesk articles""" |
| | articles = [] |
| | try: |
| | |
| | article_elements = soup.select('article') or soup.select('.article-card') |
| | |
| | for element in article_elements[:count]: |
| | title_elem = element.select_one('h2, h3, .heading') or element |
| | link_elem = element.select_one('a[href]') |
| | desc_elem = element.select_one('p, .description') or title_elem |
| | |
| | title = title_elem.get_text().strip() if title_elem else "Bitcoin News" |
| | description = desc_elem.get_text().strip() if desc_elem else "" |
| | url = link_elem.get('href') if link_elem else None |
| | |
| | |
| | if url and not url.startswith('http'): |
| | url = f"https://www.coindesk.com{url}" |
| | |
| | if title: |
| | articles.append({ |
| | 'title': title, |
| | 'description': description or "Recent Bitcoin news from CoinDesk", |
| | 'source': 'CoinDesk', |
| | 'url': url, |
| | 'published_at': datetime.now().isoformat() |
| | }) |
| | except Exception as e: |
| | print(f"Error parsing CoinDesk: {e}") |
| | |
| | return articles |
| | |
| | def _parse_cointelegraph(self, soup: BeautifulSoup, count: int) -> List[Dict[str, Any]]: |
| | """Parse CoinTelegraph articles""" |
| | articles = [] |
| | try: |
| | |
| | article_elements = soup.select('.post-card') or soup.select('article') |
| | |
| | for element in article_elements[:count]: |
| | title_elem = element.select_one('h2') or element.select_one('.post-card__title') |
| | link_elem = element.select_one('a[href]') |
| | desc_elem = element.select_one('p') or element.select_one('.post-card__text') |
| | |
| | title = title_elem.get_text().strip() if title_elem else "Bitcoin News" |
| | description = desc_elem.get_text().strip() if desc_elem else "" |
| | url = link_elem.get('href') if link_elem else None |
| | |
| | |
| | if url and not url.startswith('http'): |
| | url = f"https://cointelegraph.com{url}" |
| | |
| | if title: |
| | articles.append({ |
| | 'title': title, |
| | 'description': description or "Recent Bitcoin news from CoinTelegraph", |
| | 'source': 'CoinTelegraph', |
| | 'url': url, |
| | 'published_at': datetime.now().isoformat() |
| | }) |
| | except Exception as e: |
| | print(f"Error parsing CoinTelegraph: {e}") |
| | |
| | return articles |
| | |
| | def _parse_decrypt(self, soup: BeautifulSoup, count: int) -> List[Dict[str, Any]]: |
| | """Parse Decrypt articles""" |
| | articles = [] |
| | try: |
| | |
| | article_elements = soup.select('.card') or soup.select('article') |
| | |
| | for element in article_elements[:count]: |
| | title_elem = element.select_one('h3') or element.select_one('.title') |
| | link_elem = element.select_one('a[href]') |
| | desc_elem = element.select_one('p') or element.select_one('.excerpt') |
| | |
| | title = title_elem.get_text().strip() if title_elem else "Bitcoin News" |
| | description = desc_elem.get_text().strip() if desc_elem else "" |
| | url = link_elem.get('href') if link_elem else None |
| | |
| | if title: |
| | articles.append({ |
| | 'title': title, |
| | 'description': description or "Recent Bitcoin news from Decrypt", |
| | 'source': 'Decrypt', |
| | 'url': url, |
| | 'published_at': datetime.now().isoformat() |
| | }) |
| | except Exception as e: |
| | print(f"Error parsing Decrypt: {e}") |
| | |
| | return articles |
| | |
| | def _parse_bitcoinmagazine(self, soup: BeautifulSoup, count: int) -> List[Dict[str, Any]]: |
| | """Parse BitcoinMagazine articles""" |
| | articles = [] |
| | try: |
| | |
| | article_elements = soup.select('.article') or soup.select('article') or soup.select('.post') |
| | |
| | for element in article_elements[:count]: |
| | title_elem = element.select_one('h2, h3') or element.select_one('.title') |
| | link_elem = element.select_one('a[href]') |
| | desc_elem = element.select_one('p') or element.select_one('.excerpt, .summary') |
| | |
| | title = title_elem.get_text().strip() if title_elem else "Bitcoin News" |
| | description = desc_elem.get_text().strip() if desc_elem else "" |
| | url = link_elem.get('href') if link_elem else None |
| | |
| | if title: |
| | articles.append({ |
| | 'title': title, |
| | 'description': description or "Recent Bitcoin news from Bitcoin Magazine", |
| | 'source': 'Bitcoin Magazine', |
| | 'url': url, |
| | 'published_at': datetime.now().isoformat() |
| | }) |
| | except Exception as e: |
| | print(f"Error parsing Bitcoin Magazine: {e}") |
| | |
| | return articles |
| | |
| | def _parse_google_news(self, soup: BeautifulSoup, count: int) -> List[Dict[str, Any]]: |
| | """Parse Google News search results""" |
| | articles = [] |
| | try: |
| | |
| | article_elements = soup.select('article') or soup.select('.xrnccd') |
| | |
| | for element in article_elements[:count]: |
| | title_elem = element.select_one('h3, h4') or element.select_one('.DY5T1d') |
| | source_elem = element.select_one('.wEwyrc') or element.select_one('.SVJrMe') |
| | time_elem = element.select_one('time') or element.select_one('.WW6dff') |
| | link_elem = element.select_one('a[href]') |
| | |
| | title = title_elem.get_text().strip() if title_elem else "Bitcoin News" |
| | source = source_elem.get_text().strip() if source_elem else "Google News" |
| | |
| | |
| | url = None |
| | if link_elem: |
| | url = link_elem.get('href') |
| | if url and url.startswith('./articles/'): |
| | url = f"https://news.google.com{url[1:]}" |
| | |
| | if title: |
| | articles.append({ |
| | 'title': title, |
| | 'description': f"Bitcoin news from {source}", |
| | 'source': source, |
| | 'url': url, |
| | 'published_at': datetime.now().isoformat() |
| | }) |
| | except Exception as e: |
| | print(f"Error parsing Google News: {e}") |
| | |
| | return articles |
| | |
| | def _get_fallback_data(self): |
| | """Return fallback data if real-time news couldn't be fetched""" |
| | |
| | current_time = datetime.now().isoformat() |
| | |
| | return { |
| | "articles": [ |
| | { |
| | 'title': "Institutional Interest in Bitcoin Continues to Grow", |
| | 'description': "Major financial institutions are increasingly investing in Bitcoin as a hedge against inflation and economic uncertainty. Recent regulatory clarity has provided a more secure environment for institutional adoption.", |
| | 'source': "Financial Trends", |
| | 'url': "https://example.com/bitcoin-institutional-interest", |
| | 'published_at': current_time, |
| | }, |
| | { |
| | 'title': "Bitcoin Mining Difficulty Reaches All-Time High", |
| | 'description': "Bitcoin mining difficulty has adjusted upward by 5.8% this week, reaching a new all-time high. This increased difficulty reflects growing hash power on the network and continues to ensure the security of the blockchain.", |
| | 'source': "Crypto Analytics", |
| | 'url': "https://example.com/bitcoin-mining-difficulty", |
| | 'published_at': current_time, |
| | }, |
| | { |
| | 'title': "El Salvador's Bitcoin Treasury Surpasses $100M in Profit", |
| | 'description': "The government of El Salvador, which adopted Bitcoin as legal tender in 2021, has reported that its Bitcoin holdings have surpassed $100 million in unrealized profit as the cryptocurrency continues its upward trend.", |
| | 'source': "Global Crypto News", |
| | 'url': "https://example.com/el-salvador-bitcoin-profit", |
| | 'published_at': current_time, |
| | }, |
| | { |
| | 'title': "Analysis: Bitcoin Network Health Metrics at All-Time High", |
| | 'description': "Key Bitcoin network health metrics including hash rate, active addresses, and transaction value are all showing positive growth, suggesting robust long-term fundamentals despite short-term price volatility.", |
| | 'source': "Crypto Research Firm", |
| | 'url': "https://example.com/bitcoin-network-health", |
| | 'published_at': current_time, |
| | }, |
| | { |
| | 'title': "Regulatory Developments Could Impact Bitcoin's Institutional Adoption", |
| | 'description': "Upcoming regulatory decisions in major markets could significantly impact Bitcoin's institutional adoption trajectory, with experts suggesting clarity could unleash a new wave of investment from traditional finance.", |
| | 'source': "Regulatory Watch", |
| | 'url': "https://example.com/bitcoin-regulatory-impact", |
| | 'published_at': current_time, |
| | } |
| | ], |
| | "count": 5, |
| | "period": "Last few days (fallback data)", |
| | "timestamp": current_time |
| | } |