Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import requests | |
| import json | |
| import time | |
| import os | |
| from typing import Dict, Optional | |
| class FinancialAI: | |
| def __init__(self, api_token: str): | |
| self.api_token = api_token | |
| self.headers = { | |
| "Authorization": f"Bearer {api_token}", | |
| "Content-Type": "application/json" | |
| } | |
| # Using models that actually work with Inference API | |
| self.models = { | |
| # Financial sentiment analysis models that work | |
| "sentiment": "mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis", | |
| "finbert": "ProsusAI/finbert" | |
| # Removed mistral and llama as they don't work with Inference API | |
| } | |
| self.endpoints = { | |
| model_type: f"https://api-inference.huggingface.co/models/{model_name}" | |
| for model_type, model_name in self.models.items() | |
| } | |
| def _make_request(self, endpoint: str, payload: dict, max_retries: int = 3) -> tuple: | |
| """Returns (result, error_message)""" | |
| for attempt in range(max_retries): | |
| try: | |
| response = requests.post(endpoint, headers=self.headers, json=payload, timeout=30) | |
| if response.status_code == 200: | |
| result = response.json() | |
| return result, None | |
| elif response.status_code == 503: | |
| error_data = response.json() | |
| if "estimated_time" in error_data: | |
| wait_time = min(error_data["estimated_time"], 60) | |
| return None, f"Model loading, estimated wait: {wait_time}s" | |
| time.sleep(20) | |
| continue | |
| elif response.status_code == 401: | |
| return None, "β Invalid API token" | |
| elif response.status_code == 403: | |
| return None, "β Access denied" | |
| elif response.status_code == 429: | |
| return None, "β Rate limit exceeded" | |
| else: | |
| return None, f"β HTTP {response.status_code}: {response.text[:200]}" | |
| except requests.exceptions.Timeout: | |
| return None, "β Request timeout" | |
| except requests.exceptions.ConnectionError: | |
| return None, "β Connection error" | |
| except Exception as e: | |
| if attempt < max_retries - 1: | |
| time.sleep(5) | |
| continue | |
| return None, f"β Error: {str(e)}" | |
| return None, "β Max retries exceeded" | |
| def sentiment_analysis(self, text: str) -> tuple: | |
| """Financial sentiment analysis using FinBERT""" | |
| payload = {"inputs": text} | |
| result, error = self._make_request(self.endpoints["finbert"], payload) | |
| if result: | |
| # FinBERT returns classification results | |
| if isinstance(result, list) and len(result) > 0: | |
| predictions = result[0] | |
| if isinstance(predictions, list): | |
| # Sort by score and get top prediction | |
| top_prediction = max(predictions, key=lambda x: x['score']) | |
| sentiment = top_prediction['label'] | |
| confidence = top_prediction['score'] | |
| return f"{sentiment.upper()} (confidence: {confidence:.2f})", None | |
| else: | |
| return str(predictions), None | |
| return None, error or "No result returned" | |
| def financial_news_sentiment(self, text: str) -> tuple: | |
| """Financial news sentiment using specialized model""" | |
| payload = {"inputs": text} | |
| result, error = self._make_request(self.endpoints["sentiment"], payload) | |
| if result: | |
| if isinstance(result, list) and len(result) > 0: | |
| predictions = result[0] | |
| if isinstance(predictions, list): | |
| top_prediction = max(predictions, key=lambda x: x['score']) | |
| sentiment = top_prediction['label'] | |
| confidence = top_prediction['score'] | |
| return f"{sentiment} (confidence: {confidence:.2f})", None | |
| return None, error or "No result returned" | |
| def generate_analysis(self, query: str) -> tuple: | |
| """Generate financial analysis using real AI/LLMs""" | |
| # Option 1: Try Groq (fast and often free) | |
| groq_key = os.getenv("GROQ_API_KEY") | |
| if groq_key: | |
| try: | |
| headers = { | |
| "Authorization": f"Bearer {groq_key}", | |
| "Content-Type": "application/json" | |
| } | |
| payload = { | |
| "model": "llama3-8b-8192", | |
| "messages": [ | |
| { | |
| "role": "system", | |
| "content": "You are an expert financial analyst. Provide concise, actionable investment analysis and recommendations." | |
| }, | |
| { | |
| "role": "user", | |
| "content": f"Analyze this financial information: {query}. Provide investment insights, market sentiment, and actionable recommendations." | |
| } | |
| ], | |
| "max_tokens": 250, | |
| "temperature": 0.7 | |
| } | |
| response = requests.post( | |
| "https://api.groq.com/openai/v1/chat/completions", | |
| headers=headers, | |
| json=payload, | |
| timeout=30 | |
| ) | |
| if response.status_code == 200: | |
| result = response.json() | |
| ai_response = result['choices'][0]['message']['content'].strip() | |
| return f"π€ AI Analysis:\n{ai_response}", None | |
| except Exception as e: | |
| pass # Fall through to next option | |
| # Option 2: Try OpenAI | |
| openai_key = os.getenv("OPENAI_API_KEY") | |
| if openai_key: | |
| try: | |
| headers = { | |
| "Authorization": f"Bearer {openai_key}", | |
| "Content-Type": "application/json" | |
| } | |
| payload = { | |
| "model": "gpt-3.5-turbo", | |
| "messages": [ | |
| { | |
| "role": "system", | |
| "content": "You are a financial analyst. Provide brief, actionable investment analysis." | |
| }, | |
| { | |
| "role": "user", | |
| "content": f"Analyze: {query}" | |
| } | |
| ], | |
| "max_tokens": 200, | |
| "temperature": 0.7 | |
| } | |
| response = requests.post( | |
| "https://api.openai.com/v1/chat/completions", | |
| headers=headers, | |
| json=payload, | |
| timeout=30 | |
| ) | |
| if response.status_code == 200: | |
| result = response.json() | |
| ai_response = result['choices'][0]['message']['content'].strip() | |
| return f"π€ AI Analysis:\n{ai_response}", None | |
| except Exception as e: | |
| pass # Fall through to next option | |
| # Option 3: Try Hugging Face Inference Providers | |
| try: | |
| from huggingface_hub import InferenceClient | |
| client = InferenceClient(token=self.api_token) | |
| messages = [ | |
| { | |
| "role": "system", | |
| "content": "You are a financial analyst. Provide brief investment analysis." | |
| }, | |
| { | |
| "role": "user", | |
| "content": f"Analyze: {query}" | |
| } | |
| ] | |
| # Try Inference Providers | |
| response = client.chat_completion( | |
| messages=messages, | |
| model="microsoft/DialoGPT-medium", | |
| max_tokens=200, | |
| temperature=0.7 | |
| ) | |
| if response and response.choices: | |
| ai_response = response.choices[0].message.content.strip() | |
| return f"π€ AI Analysis:\n{ai_response}", None | |
| except Exception as e: | |
| pass | |
| # If all AI options fail, return error with instructions | |
| return None, ("β No AI models available. To get real AI analysis, add API keys:\n" | |
| "β’ GROQ_API_KEY (free at groq.com)\n" | |
| "β’ OPENAI_API_KEY (at openai.com)\n" | |
| "β’ Or use Hugging Face Inference Providers") | |
| # Get API token - try multiple possible secret names and methods | |
| def get_hf_token(): | |
| """Try multiple methods to get the HF token""" | |
| # Method 1: Environment variables | |
| token_options = [ | |
| "HUGGINGFACE_API_TOKEN", "HF_TOKEN", "API_TOKEN", "TOKEN", | |
| "HUGGINGFACE_TOKEN", "HF_API_TOKEN" | |
| ] | |
| for var_name in token_options: | |
| token = os.getenv(var_name) | |
| if token and token != "your_huggingface_token_here": | |
| return token | |
| # Method 2: Try huggingface_hub | |
| try: | |
| from huggingface_hub import HfFolder | |
| token = HfFolder.get_token() | |
| if token: | |
| return token | |
| except: | |
| pass | |
| return None | |
| API_TOKEN = get_hf_token() or "your_huggingface_token_here" | |
| def test_token(): | |
| """Test if the API token is working""" | |
| # Debug: Show what token we're getting | |
| token_preview = API_TOKEN[:8] + "..." + API_TOKEN[-4:] if len(API_TOKEN) > 12 else "TOO_SHORT" | |
| print(f"π Debug - Token preview: {token_preview}") | |
| print(f"π Debug - Token length: {len(API_TOKEN)}") | |
| print(f"π Debug - Environment variables available: {list(os.environ.keys())}") | |
| if API_TOKEN == "your_huggingface_token_here": | |
| return "β Please set your Hugging Face API token! Check Spaces secrets." | |
| if not API_TOKEN.startswith('hf_'): | |
| return f"β Token should start with 'hf_' but starts with: {API_TOKEN[:3]}..." | |
| if len(API_TOKEN) < 30: | |
| return f"β Token appears too short: {len(API_TOKEN)} characters" | |
| headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
| try: | |
| # Use the correct whoami-v2 endpoint | |
| response = requests.get("https://huggingface.co/api/whoami-v2", headers=headers, timeout=10) | |
| if response.status_code == 200: | |
| user_data = response.json() | |
| username = user_data.get('name', 'Unknown') | |
| return f"β API token is valid! User: {username}" | |
| elif response.status_code == 401: | |
| return "β Token is invalid. Check your Spaces secret configuration." | |
| else: | |
| return f"β Token validation failed: HTTP {response.status_code} - {response.text[:100]}" | |
| except Exception as e: | |
| return f"β Token test error: {str(e)}" | |
| financial_ai = FinancialAI(API_TOKEN) | |
| def analyze_query(query, analysis_type): | |
| if not query.strip(): | |
| return "Please enter a query." | |
| # Test token first | |
| token_status = test_token() | |
| if "β" in token_status: | |
| return token_status | |
| try: | |
| if analysis_type == "FinBERT Sentiment": | |
| result, error = financial_ai.sentiment_analysis(query) | |
| return result if result else f"β FinBERT Sentiment failed: {error}" | |
| elif analysis_type == "Financial News Sentiment": | |
| result, error = financial_ai.financial_news_sentiment(query) | |
| return result if result else f"β Financial News Sentiment failed: {error}" | |
| elif analysis_type == "AI Analysis": | |
| result, error = financial_ai.generate_analysis(query) | |
| return result if result else f"β AI Analysis failed: {error}" | |
| else: # Comprehensive | |
| output = "π COMPREHENSIVE FINANCIAL ANALYSIS\n\n" | |
| # FinBERT Sentiment | |
| sentiment1, s1_error = financial_ai.sentiment_analysis(query) | |
| if sentiment1: | |
| output += f"π FinBERT Sentiment: {sentiment1}\n\n" | |
| else: | |
| output += f"π FinBERT Sentiment: Failed - {s1_error}\n\n" | |
| # Financial News Sentiment | |
| sentiment2, s2_error = financial_ai.financial_news_sentiment(query) | |
| if sentiment2: | |
| output += f"π° Financial News Sentiment: {sentiment2}\n\n" | |
| else: | |
| output += f"π° Financial News Sentiment: Failed - {s2_error}\n\n" | |
| # AI Analysis | |
| analysis, a_error = financial_ai.generate_analysis(query) | |
| if analysis: | |
| output += f"π€ AI Analysis:\n{analysis}\n\n" | |
| else: | |
| output += f"π€ AI Analysis: Failed - {a_error}\n\n" | |
| return output | |
| except Exception as e: | |
| return f"β Unexpected error: {str(e)}" | |
| # Create Gradio interface | |
| with gr.Blocks(title="Financial AI Analysis Platform") as demo: | |
| gr.Markdown("# π¦ Financial AI Analysis Platform") | |
| gr.Markdown("Get AI-powered financial insights using working Hugging Face models for sentiment analysis and market insights.") | |
| # Add token status display | |
| with gr.Row(): | |
| token_status = gr.Textbox( | |
| label="π Token Status", | |
| value=test_token(), | |
| interactive=False | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| query_input = gr.Textbox( | |
| label="π Enter your financial query or news", | |
| placeholder="e.g., 'Apple stock is performing well' or 'Tesla reports strong earnings'", | |
| lines=3 | |
| ) | |
| analysis_type = gr.Dropdown( | |
| choices=["FinBERT Sentiment", "Financial News Sentiment", "AI Analysis", "Comprehensive"], | |
| label="π Analysis Type", | |
| value="FinBERT Sentiment" | |
| ) | |
| analyze_btn = gr.Button("π Analyze", variant="primary") | |
| with gr.Column(): | |
| output = gr.Textbox( | |
| label="π‘ Analysis Results", | |
| lines=12, | |
| interactive=False | |
| ) | |
| analyze_btn.click( | |
| fn=analyze_query, | |
| inputs=[query_input, analysis_type], | |
| outputs=output | |
| ) | |
| gr.Examples( | |
| examples=[ | |
| ["Apple stock is performing exceptionally well this quarter", "FinBERT Sentiment"], | |
| ["Tesla reports disappointing earnings, stock falls", "Financial News Sentiment"], | |
| ["Microsoft investment potential for 2025", "AI Analysis"], | |
| ["Amazon announces major expansion plans", "Comprehensive"] | |
| ], | |
| inputs=[query_input, analysis_type] | |
| ) | |
| gr.Markdown(""" | |
| ### π Models Used: | |
| - **FinBERT**: ProsusAI/finbert (financial sentiment analysis) | |
| - **Financial News**: mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis | |
| - **AI Analysis**: Real LLMs via Hugging Face Inference Providers | |
| ### β οΈ Note: | |
| AI Analysis uses actual language models for genuine AI-generated insights. | |
| If LLMs are unavailable, you can also integrate with OpenAI, Anthropic, or other providers. | |
| """) | |
| if __name__ == "__main__": | |
| demo.launch(share=True) |