| # import os | |
| # from typing import Dict, Any, List, Optional | |
| # from datetime import datetime, timedelta | |
| # import requests | |
| # from crewai_tools.tools import BaseTool | |
| # from crewai_tools import WebsiteSearchTool, TavilySearchTool | |
| # class BitcoinNewsTool(BaseTool): | |
| # name: str = "Bitcoin News Tool" | |
| # description: str = "Fetches latest news articles about Bitcoin and cryptocurrency market" | |
| # def __init__(self): | |
| # super().__init__() | |
| # self.tavily_api_key = os.getenv("TAVILY_API_KEY") | |
| # self.tavily_search = TavilySearchTool() | |
| # self.web_search_tool = WebsiteSearchTool() | |
| # def _run(self, days_back: int = 3, limit: int = 10) -> Dict[str, Any]: | |
| # """ | |
| # Fetch news about Bitcoin from various sources | |
| # Args: | |
| # days_back: Number of days to look back for news | |
| # limit: Maximum number of articles to return | |
| # Returns: | |
| # Dictionary with news articles and metadata | |
| # """ | |
| # results = [] | |
| # # Use Tavily Search API if available | |
| # if self.tavily_api_key: | |
| # try: | |
| # # Create search query based on time frame | |
| # search_query = f"latest bitcoin cryptocurrency news analysis last {days_back} days" | |
| # # Get news from Tavily | |
| # tavily_results = self.tavily_search._run( | |
| # query=search_query, | |
| # max_results=limit | |
| # ) | |
| # if tavily_results: | |
| # # Parse Tavily results | |
| # if isinstance(tavily_results, str): | |
| # # Process text response | |
| # items = tavily_results.split('\n\n') | |
| # for item in items: | |
| # if len(results) >= limit: | |
| # break | |
| # # Try to extract title and content | |
| # lines = item.split('\n') | |
| # title = lines[0] if lines else "No title available" | |
| # content = ' '.join(lines[1:]) if len(lines) > 1 else title | |
| # url = None | |
| # source = "Tavily Search" | |
| # # Look for URL | |
| # for line in lines: | |
| # if line.startswith("http"): | |
| # url = line.strip() | |
| # break | |
| # results.append({ | |
| # 'title': title, | |
| # 'description': content, | |
| # 'source': source, | |
| # 'url': url, | |
| # 'published_at': datetime.now().isoformat(), | |
| # 'content_preview': content | |
| # }) | |
| # elif isinstance(tavily_results, list): | |
| # # Process structured results if API returns JSON | |
| # for item in tavily_results: | |
| # if len(results) >= limit: | |
| # break | |
| # if isinstance(item, dict): | |
| # results.append({ | |
| # 'title': item.get('title', 'No title available'), | |
| # 'description': item.get('content', item.get('snippet', '')), | |
| # 'source': item.get('source', 'Tavily Search'), | |
| # 'url': item.get('url'), | |
| # 'published_at': datetime.now().isoformat(), | |
| # 'content_preview': item.get('content', item.get('snippet', '')) | |
| # }) | |
| # except Exception as e: | |
| # print(f"Error fetching news from Tavily: {e}") | |
| # # If we have insufficient results or Tavily is not available, use web search | |
| # if len(results) < limit: | |
| # try: | |
| # additional_needed = limit - len(results) | |
| # web_search_results = self.web_search_tool._run( | |
| # query=f"latest bitcoin news crypto market analysis last {days_back} days" | |
| # ) | |
| # # Parse and add web search results if available | |
| # if isinstance(web_search_results, str) and len(web_search_results) > 0: | |
| # # Simple parsing - assume each line might contain a news item | |
| # lines = web_search_results.split('\n') | |
| # for line in lines: | |
| # if len(line.strip()) > 0 and len(results) < limit: | |
| # # Try to extract a title and URL | |
| # parts = line.split(' - ') | |
| # title = parts[0] if len(parts) > 0 else line | |
| # source = parts[1] if len(parts) > 1 else "Web Search" | |
| # results.append({ | |
| # 'title': title, | |
| # 'description': line, | |
| # 'source': source, | |
| # 'url': None, # URL not available from simple parsing | |
| # 'published_at': None, # Date not available | |
| # 'content_preview': line | |
| # }) | |
| # if len(results) >= limit: | |
| # break | |
| # except Exception as e: | |
| # print(f"Error fetching news from web search: {e}") | |
| # return { | |
| # "articles": results, | |
| # "count": len(results), | |
| # "period": f"Last {days_back} days", | |
| # "timestamp": datetime.now().isoformat() | |
| # } | |
| # class BitcoinSentimentTool(BaseTool): | |
| # name: str = "Bitcoin Sentiment Analysis Tool" | |
| # description: str = "Analyzes sentiment from recent Bitcoin news and social media data" | |
| # def __init__(self): | |
| # super().__init__() | |
| # self.news_tool = BitcoinNewsTool() | |
| # def _run(self, days_back: int = 3) -> Dict[str, Any]: | |
| # """ | |
| # Analyze sentiment from Bitcoin news | |
| # Args: | |
| # days_back: Number of days to look back for analysis | |
| # Returns: | |
| # Dictionary with sentiment analysis results | |
| # """ | |
| # # Get news data | |
| # news_data = self.news_tool._run(days_back=days_back, limit=15) | |
| # if "articles" not in news_data or len(news_data["articles"]) == 0: | |
| # return { | |
| # "error": "No news articles found for analysis", | |
| # "sentiment_score": 0, | |
| # "sentiment": "neutral", | |
| # "confidence": 0 | |
| # } | |
| # # Extract titles and descriptions for sentiment analysis | |
| # texts = [] | |
| # for article in news_data["articles"]: | |
| # title = article.get("title", "") | |
| # description = article.get("description", "") | |
| # if title and len(title) > 0: | |
| # texts.append(title) | |
| # if description and len(description) > 0: | |
| # texts.append(description) | |
| # # In a real implementation, you would use a proper sentiment analysis API here | |
| # # For this example, we'll use a simple keyword-based approach | |
| # positive_keywords = [ | |
| # "surge", "rally", "bullish", "growth", "positive", "gain", "gains", "uptrend", | |
| # "soar", "adoption", "breakthrough", "opportunity", "profit", "boost", "climb", | |
| # "optimistic", "confidence", "successful", "legalization", "mainstream", | |
| # "institutional", "hodl", "to the moon", "recovery" | |
| # ] | |
| # negative_keywords = [ | |
| # "crash", "fall", "bearish", "drop", "negative", "loss", "losses", "downtrend", | |
| # "plummet", "ban", "regulation", "risk", "caution", "warning", "decline", | |
| # "pessimistic", "fear", "sell-off", "correction", "bubble", "volatility", | |
| # "scandal", "hack", "fraud", "manipulation", "uncertainty" | |
| # ] | |
| # # Count occurrences of positive and negative keywords | |
| # positive_count = 0 | |
| # negative_count = 0 | |
| # for text in texts: | |
| # if text: | |
| # text_lower = text.lower() | |
| # for keyword in positive_keywords: | |
| # if keyword.lower() in text_lower: | |
| # positive_count += 1 | |
| # for keyword in negative_keywords: | |
| # if keyword.lower() in text_lower: | |
| # negative_count += 1 | |
| # # Calculate sentiment score (-1 to 1) | |
| # total_count = positive_count + negative_count | |
| # if total_count == 0: | |
| # sentiment_score = 0 | |
| # else: | |
| # sentiment_score = (positive_count - negative_count) / total_count | |
| # # Determine sentiment category | |
| # if sentiment_score > 0.2: | |
| # sentiment = "positive" | |
| # elif sentiment_score < -0.2: | |
| # sentiment = "negative" | |
| # else: | |
| # sentiment = "neutral" | |
| # # Calculate confidence (higher count = higher confidence) | |
| # confidence = min(total_count / max(1, len(texts)), 1.0) * 100 | |
| # return { | |
| # "sentiment_score": sentiment_score, | |
| # "sentiment": sentiment, | |
| # "confidence": confidence, | |
| # "positive_mentions": positive_count, | |
| # "negative_mentions": negative_count, | |
| # "total_articles": len(news_data["articles"]), | |
| # "period": f"Last {days_back} days", | |
| # "timestamp": datetime.now().isoformat() | |
| # } |