Spaces:
Runtime error
Runtime error
Initial upload: local files to Hugging Face repo
Browse files- .env +3 -0
- README.md +5 -5
- agents/alertcoordinator.py +22 -0
- agents/datacollector.py +11 -0
- agents/filterclassifier.py +15 -0
- agents/learningagent.py +8 -0
- agents/orchestrator.py +14 -0
- agents/sentimentanalyzer.py +17 -0
- app.py +54 -0
- config.py +17 -0
- dashboard.py +17 -0
- requirements.txt +14 -0
.env
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FINNHUB_API_KEY=d3ql0apr01quv7kbbqe0d3ql0apr01quv7kbbqeg
|
| 2 |
+
ALPHAVANTAGE_API_KEY=GSA9BWLJRJI0E4GN
|
| 3 |
+
GNEWS_API_KEY=27e5b9ee0a57864a86e2b6f13390ad35
|
README.md
CHANGED
|
@@ -1,14 +1,14 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.49.1
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
-
short_description:
|
| 12 |
---
|
| 13 |
|
| 14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Asset News Notifcation System
|
| 3 |
+
emoji: 👀
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: indigo
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.49.1
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
+
short_description: Agentic AI system to notify a user via of market events
|
| 12 |
---
|
| 13 |
|
| 14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
agents/alertcoordinator.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
class AlertCoordinatorAgent:
|
| 4 |
+
def __init__(self, config, database):
|
| 5 |
+
self.config = config
|
| 6 |
+
self.db = database
|
| 7 |
+
|
| 8 |
+
def process_alerts(self, analyzed):
|
| 9 |
+
for article in analyzed:
|
| 10 |
+
keywords = [k for k in self.config.IMPACT_KEYWORDS if k in article.get('headline','').lower()]
|
| 11 |
+
urgency = len(keywords) * 0.2 + abs(article.get('sentiment',0)) * 0.3
|
| 12 |
+
if urgency > 0.5: # Threshold, adjust as needed
|
| 13 |
+
alert = {
|
| 14 |
+
'timestamp': datetime.now().isoformat(),
|
| 15 |
+
'ticker': article.get('symbol',''),
|
| 16 |
+
'headline': article.get('headline',''),
|
| 17 |
+
'summary': article.get('summary',''),
|
| 18 |
+
'sentiment': article['sentiment'],
|
| 19 |
+
'impact_keywords': ','.join(keywords),
|
| 20 |
+
'urgency_score': round(min(urgency, 1.0),2)
|
| 21 |
+
}
|
| 22 |
+
self.db.save_alert(alert)
|
agents/datacollector.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
from config.config import Config
|
| 3 |
+
|
| 4 |
+
class DataCollectionAgent:
|
| 5 |
+
def __init__(self, config):
|
| 6 |
+
self.config = config
|
| 7 |
+
|
| 8 |
+
def collect_news(self, ticker):
|
| 9 |
+
url = f"https://finnhub.io/api/v1/company-news?symbol={ticker}&from=2023-01-01&to=2025-12-31&token={self.config.FINNHUB_API_KEY}"
|
| 10 |
+
resp = requests.get(url)
|
| 11 |
+
return resp.json() if resp.status_code == 200 else []
|
agents/filterclassifier.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from config.config import Config
|
| 2 |
+
|
| 3 |
+
class FilterClassificationAgent:
|
| 4 |
+
def __init__(self, config):
|
| 5 |
+
self.config = config
|
| 6 |
+
|
| 7 |
+
def filter_articles(self, articles):
|
| 8 |
+
impact_kw = self.config.IMPACT_KEYWORDS
|
| 9 |
+
filtered = []
|
| 10 |
+
for a in articles:
|
| 11 |
+
in_head = any(k in a.get('headline', '').lower() for k in impact_kw)
|
| 12 |
+
in_sum = any(k in a.get('summary', '').lower() for k in impact_kw)
|
| 13 |
+
if in_head or in_sum:
|
| 14 |
+
filtered.append(a)
|
| 15 |
+
return filtered
|
agents/learningagent.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class LearningAgent:
|
| 2 |
+
def __init__(self, config, database):
|
| 3 |
+
self.config = config
|
| 4 |
+
self.db = database
|
| 5 |
+
|
| 6 |
+
def learn_and_optimize(self):
|
| 7 |
+
# Placeholder: you can add logic to update thresholds, expand keywords, or adjust urgency rules based on data
|
| 8 |
+
print("Learning/optimization step placeholder.")
|
agents/orchestrator.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class OrchestratorAgent:
|
| 2 |
+
def __init__(self, datacol, filterer, sentiment, alert, learner):
|
| 3 |
+
self.datacollector = datacol
|
| 4 |
+
self.filterclassifier = filterer
|
| 5 |
+
self.sentimentanalyzer = sentiment
|
| 6 |
+
self.alertcoordinator = alert
|
| 7 |
+
self.learningagent = learner
|
| 8 |
+
|
| 9 |
+
def process(self, ticker):
|
| 10 |
+
news = self.datacollector.collect_news(ticker)
|
| 11 |
+
filtered = self.filterclassifier.filter_articles(news)
|
| 12 |
+
analyzed = self.sentimentanalyzer.batch_analyze(filtered)
|
| 13 |
+
self.alertcoordinator.process_alerts(analyzed)
|
| 14 |
+
return analyzed
|
agents/sentimentanalyzer.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
|
| 2 |
+
from textblob import TextBlob
|
| 3 |
+
|
| 4 |
+
class SentimentAnalysisAgent:
|
| 5 |
+
def __init__(self):
|
| 6 |
+
self.analyzer = SentimentIntensityAnalyzer()
|
| 7 |
+
|
| 8 |
+
def analyze(self, news_item):
|
| 9 |
+
text = news_item.get("headline","") + " " + news_item.get("summary","")
|
| 10 |
+
vs = self.analyzer.polarity_scores(text)
|
| 11 |
+
blob = TextBlob(text)
|
| 12 |
+
final_score = 0.7*vs['compound'] + 0.3*blob.sentiment.polarity
|
| 13 |
+
news_item['sentiment'] = final_score
|
| 14 |
+
return news_item
|
| 15 |
+
|
| 16 |
+
def batch_analyze(self, articles):
|
| 17 |
+
return [self.analyze(a) for a in articles]
|
app.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from config.config import Config
|
| 3 |
+
from utils.database import NewsDatabase
|
| 4 |
+
from agents.datacollector import DataCollectionAgent
|
| 5 |
+
from agents.filterclassifier import FilterClassificationAgent
|
| 6 |
+
from agents.sentimentanalyzer import SentimentAnalysisAgent
|
| 7 |
+
from agents.alertcoordinator import AlertCoordinatorAgent
|
| 8 |
+
from agents.learningagent import LearningAgent
|
| 9 |
+
from agents.orchestrator import OrchestratorAgent
|
| 10 |
+
from apscheduler.schedulers.background import BackgroundScheduler
|
| 11 |
+
|
| 12 |
+
config = Config()
|
| 13 |
+
db = NewsDatabase(config.DATABASE_PATH)
|
| 14 |
+
datacol = DataCollectionAgent(config)
|
| 15 |
+
filterer = FilterClassificationAgent(config)
|
| 16 |
+
sentiment = SentimentAnalysisAgent()
|
| 17 |
+
alert = AlertCoordinatorAgent(config, db)
|
| 18 |
+
learner = LearningAgent(config, db)
|
| 19 |
+
orchestrator = OrchestratorAgent(datacol, filterer, sentiment, alert, learner)
|
| 20 |
+
|
| 21 |
+
scheduler = BackgroundScheduler()
|
| 22 |
+
scheduler.add_job(lambda: orchestrator.process("AAPL"), 'interval', minutes=config.CHECK_INTERVAL_MINUTES)
|
| 23 |
+
scheduler.add_job(learner.learn_and_optimize, 'cron', hour=0)
|
| 24 |
+
scheduler.start()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def user_request(user_input):
|
| 28 |
+
# Simple ticker extraction (can expand this for more NLP):
|
| 29 |
+
words = user_input.lower().split()
|
| 30 |
+
ticker = None
|
| 31 |
+
for word in words:
|
| 32 |
+
if word.isalpha() and len(word) <= 5:
|
| 33 |
+
ticker = word.upper()
|
| 34 |
+
break
|
| 35 |
+
if not ticker:
|
| 36 |
+
return "Please specify a stock ticker (e.g. AAPL)."
|
| 37 |
+
# Run through orchestrator:
|
| 38 |
+
results = orchestrator.process(ticker)
|
| 39 |
+
if not results:
|
| 40 |
+
return f"No recent news found for {ticker}."
|
| 41 |
+
output = []
|
| 42 |
+
for r in results:
|
| 43 |
+
output.append(f"Headline: {r['headline']}\nSentiment: {r['sentiment']:.2f}\nSummary: {r['summary']}\n")
|
| 44 |
+
return "\n".join(output)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
#def user_request(ticker):
|
| 48 |
+
# result = orchestrator.process(ticker.upper())
|
| 49 |
+
# return str(result)
|
| 50 |
+
|
| 51 |
+
#iface = gr.Interface(fn=user_request, inputs=gr.Textbox(label="Stock Symbol"), outputs=gr.Textbox(label="News/Alerts"), title="Agentic Financial News Monitor")
|
| 52 |
+
iface = gr.Interface(fn=user_request, inputs=gr.Textbox(label="What should I track?"), outputs=gr.Textbox(label="Latest News/Sentiment"))
|
| 53 |
+
iface.launch()
|
| 54 |
+
|
config.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
load_dotenv()
|
| 4 |
+
|
| 5 |
+
class Config:
|
| 6 |
+
FINNHUB_API_KEY = os.getenv("FINNHUB_API_KEY")
|
| 7 |
+
ALPHAVANTAGE_API_KEY = os.getenv("ALPHAVANTAGE_API_KEY")
|
| 8 |
+
GNEWS_API_KEY = os.getenv("GNEWS_API_KEY")
|
| 9 |
+
<<<<<<< HEAD
|
| 10 |
+
=======
|
| 11 |
+
DATABASE_PATH = "data/newsalerts.db"
|
| 12 |
+
IMPACT_KEYWORDS = ["pandemic", "war", "conflict", "earnings", "bankruptcy", "inflation", "recession", "federal reserve", "interest rate"]
|
| 13 |
+
>>>>>>> 8273ac1405391cea658ce600557f91e4f7b7219b
|
| 14 |
+
CHECK_INTERVAL_MINUTES = 15
|
| 15 |
+
IMPACT_KEYWORDS = ["pandemic","war","conflict","earnings","bankruptcy","inflation","recession","federal reserve","interest rate"]
|
| 16 |
+
DATABASE_PATH = "data/newsalerts.db"
|
| 17 |
+
|
dashboard.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sqlite3
|
| 2 |
+
|
| 3 |
+
class NewsDatabase:
|
| 4 |
+
def __init__(self, db_path):
|
| 5 |
+
self.conn = sqlite3.connect(db_path)
|
| 6 |
+
self.init_db()
|
| 7 |
+
def init_db(self):
|
| 8 |
+
c = self.conn.cursor()
|
| 9 |
+
c.execute('''CREATE TABLE IF NOT EXISTS alerts (timestamp TEXT, ticker TEXT, headline TEXT, summary TEXT, sentiment REAL, impact_keywords TEXT, urgency REAL)''')
|
| 10 |
+
self.conn.commit()
|
| 11 |
+
def save_alert(self, alert):
|
| 12 |
+
c = self.conn.cursor()
|
| 13 |
+
c.execute('''INSERT INTO alerts VALUES (?,?,?,?,?,?,?)''', (
|
| 14 |
+
alert['timestamp'], alert['ticker'], alert['headline'], alert['summary'],
|
| 15 |
+
alert['sentiment'], alert['impact_keywords'], alert['urgency_score']
|
| 16 |
+
))
|
| 17 |
+
self.conn.commit()
|
requirements.txt
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
requests
|
| 2 |
+
aiohttp
|
| 3 |
+
pandas
|
| 4 |
+
nltk
|
| 5 |
+
vaderSentiment
|
| 6 |
+
textblob
|
| 7 |
+
apscheduler
|
| 8 |
+
flask
|
| 9 |
+
python-dotenv
|
| 10 |
+
gradio
|
| 11 |
+
transformers # Optional, for advanced NLP
|
| 12 |
+
torch # Optional, if using transformers
|
| 13 |
+
finnhub-python
|
| 14 |
+
alpha-vantage
|