|
|
|
|
|
import os |
|
|
from dotenv import load_dotenv |
|
|
from transformers import pipeline |
|
|
import pandas as pd |
|
|
from GoogleNews import GoogleNews |
|
|
from langchain_openai import ChatOpenAI |
|
|
import praw |
|
|
from datetime import datetime |
|
|
import numpy as np |
|
|
from tavily import TavilyClient |
|
|
|
|
|
load_dotenv() |
|
|
TAVILY_API_KEY = os.environ["TAVILY_API_KEY"] |
|
|
|
|
|
def fetch_news(topic): |
|
|
|
|
|
""" Fetches news articles within a specified date range. |
|
|
|
|
|
Args: |
|
|
- topic (str): Topic of interest |
|
|
|
|
|
Returns: |
|
|
- list: A list of dictionaries containing news. """ |
|
|
|
|
|
load_dotenv() |
|
|
days_to_fetch_news = os.environ["DAYS_TO_FETCH_NEWS"] |
|
|
|
|
|
googlenews = GoogleNews() |
|
|
googlenews.set_period(days_to_fetch_news) |
|
|
googlenews.get_news(topic) |
|
|
news_json=googlenews.get_texts() |
|
|
urls=googlenews.get_links() |
|
|
|
|
|
no_of_news_articles_to_fetch = os.environ["NO_OF_NEWS_ARTICLES_TO_FETCH"] |
|
|
news_article_list = [] |
|
|
counter = 0 |
|
|
for article in news_json: |
|
|
|
|
|
if(counter >= int(no_of_news_articles_to_fetch)): |
|
|
break |
|
|
|
|
|
relevant_info = { |
|
|
'News_Article': article, |
|
|
'URL': urls[counter] |
|
|
} |
|
|
news_article_list.append(relevant_info) |
|
|
counter+=1 |
|
|
return news_article_list |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def fetch_tavily_news(answer): |
|
|
try: |
|
|
|
|
|
|
|
|
urls = [] |
|
|
|
|
|
|
|
|
try: |
|
|
parts = answer.split("url") |
|
|
for part in parts[1:]: |
|
|
try: |
|
|
|
|
|
if '\\\\' in part: |
|
|
url = part.split('\\\\')[2].split('"')[1] |
|
|
elif '"' in part: |
|
|
url = part.split('"')[1] |
|
|
else: |
|
|
continue |
|
|
|
|
|
if url.startswith('http'): |
|
|
urls.append(url) |
|
|
except (IndexError, AttributeError): |
|
|
continue |
|
|
except Exception as e: |
|
|
print(f"Error extracting URLs: {e}") |
|
|
|
|
|
|
|
|
if not urls: |
|
|
|
|
|
try: |
|
|
import json |
|
|
data = json.loads(answer) |
|
|
if isinstance(data, list): |
|
|
for item in data: |
|
|
if isinstance(item, dict) and 'url' in item: |
|
|
urls.append(item['url']) |
|
|
except json.JSONDecodeError: |
|
|
pass |
|
|
|
|
|
|
|
|
if not urls: |
|
|
import re |
|
|
url_pattern = r'https?://[^\s<>"]+|www\.[^\s<>"]+|http?://[^\s<>"]+' |
|
|
urls = re.findall(url_pattern, answer) |
|
|
|
|
|
|
|
|
urls = list(dict.fromkeys(urls)) |
|
|
|
|
|
return urls |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error in fetch_tavily_news: {e}") |
|
|
return [] |
|
|
|
|
|
|
|
|
def fetch_reddit_news(topic): |
|
|
load_dotenv() |
|
|
REDDIT_USER_AGENT= os.environ["REDDIT_USER_AGENT"] |
|
|
REDDIT_CLIENT_ID= os.environ["REDDIT_CLIENT_ID"] |
|
|
REDDIT_CLIENT_SECRET= os.environ["REDDIT_CLIENT_SECRET"] |
|
|
|
|
|
user_agent = REDDIT_USER_AGENT |
|
|
reddit = praw.Reddit ( |
|
|
client_id= REDDIT_CLIENT_ID, |
|
|
client_secret= REDDIT_CLIENT_SECRET, |
|
|
user_agent=user_agent |
|
|
) |
|
|
|
|
|
headlines = set ( ) |
|
|
for submission in reddit.subreddit('nova').search(topic,time_filter='day'): |
|
|
headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
|
|
|
|
|
for submission in reddit.subreddit('fednews').search(topic,time_filter='day'): |
|
|
headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
|
|
|
|
|
for submission in reddit.subreddit('washingtondc').search(topic,time_filter='day'): |
|
|
headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
|
|
|
|
|
if len(headlines)<10: |
|
|
for submission in reddit.subreddit('washingtondc').search(topic,time_filter='year'): |
|
|
headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
|
|
if len(headlines)<10: |
|
|
for submission in reddit.subreddit('washingtondc').search(topic): |
|
|
headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
|
|
|
|
|
return headlines |
|
|
|
|
|
def analyze_sentiment(article): |
|
|
""" |
|
|
Analyzes the sentiment of a given news article. |
|
|
|
|
|
Args: |
|
|
- news_article (dict): Dictionary containing 'summary', 'headline', and 'created_at' keys. |
|
|
|
|
|
Returns: |
|
|
- dict: A dictionary containing sentiment analysis results. |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
classifier = pipeline(model='tabularisai/robust-sentiment-analysis') |
|
|
sentiment_result = classifier(str(article)) |
|
|
|
|
|
analysis_result = { |
|
|
'News_Article': article, |
|
|
'Sentiment': sentiment_result |
|
|
} |
|
|
|
|
|
return analysis_result |
|
|
|
|
|
|
|
|
def generate_summary_of_sentiment(sentiment_analysis_results): |
|
|
|
|
|
|
|
|
news_article_sentiment = str(sentiment_analysis_results) |
|
|
print("News article sentiment : " + news_article_sentiment) |
|
|
|
|
|
|
|
|
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] |
|
|
model = ChatOpenAI( |
|
|
model="gpt-4o", |
|
|
temperature=0, |
|
|
max_tokens=None, |
|
|
timeout=None, |
|
|
max_retries=2, |
|
|
api_key=OPENAI_API_KEY, |
|
|
|
|
|
|
|
|
|
|
|
) |
|
|
|
|
|
messages=[ |
|
|
{"role": "system", "content": "You are a helpful assistant that looks at all news articles with their sentiment, hyperlink and date in front of the article text, the articles MUST be ordered by date!, and generate a summary rationalizing dominant sentiment. At the end of the summary, add URL links for all the articles in the markdown format for streamlit. Make sure the articles as well as the links are ordered descending by Date!!!!!!! Example of adding the URLs: The Check out the links: [link](%s) % url. "}, |
|
|
{"role": "user", "content": f"News articles and their sentiments: {news_article_sentiment}"} |
|
|
] |
|
|
response = model.invoke(messages) |
|
|
|
|
|
|
|
|
summary = response.content |
|
|
print ("+++++++++++++++++++++++++++++++++++++++++++++++") |
|
|
print(summary) |
|
|
print ("+++++++++++++++++++++++++++++++++++++++++++++++") |
|
|
return summary |
|
|
|
|
|
|
|
|
def plot_sentiment_graph(sentiment_analysis_results): |
|
|
""" |
|
|
Plots a sentiment analysis graph |
|
|
|
|
|
Args: |
|
|
- sentiment_analysis_result): (dict): Dictionary containing 'Review Title : Summary', 'Rating', and 'Sentiment' keys. |
|
|
|
|
|
Returns: |
|
|
- dict: A dictionary containing sentiment analysis results. |
|
|
""" |
|
|
df = pd.DataFrame(sentiment_analysis_results) |
|
|
|
|
|
|
|
|
|
|
|
grouped = df['Sentiment'].value_counts() |
|
|
|
|
|
sentiment_counts = df['Sentiment'].value_counts() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return sentiment_counts |
|
|
|
|
|
|
|
|
def get_dominant_sentiment (sentiment_analysis_results): |
|
|
""" |
|
|
Returns overall sentiment, negative or positive or neutral depending on the count of negative sentiment vs positive sentiment |
|
|
|
|
|
Args: |
|
|
- sentiment_analysis_result): (dict): Dictionary containing 'summary', 'headline', and 'created_at' keys. |
|
|
|
|
|
Returns: |
|
|
- dict: A dictionary containing sentiment analysis results. |
|
|
""" |
|
|
df = pd.DataFrame(sentiment_analysis_results) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sentiment_counts = df['Sentiment'].value_counts().reset_index() |
|
|
sentiment_counts.columns = ['sentiment', 'count'] |
|
|
print(sentiment_counts) |
|
|
|
|
|
|
|
|
dominant_sentiment = sentiment_counts.loc[sentiment_counts['count'].idxmax()] |
|
|
|
|
|
return dominant_sentiment['sentiment'] |
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
|
|
|
|
|
news_articles = fetch_news('AAPL') |
|
|
|
|
|
analysis_results = [] |
|
|
|
|
|
|
|
|
for article in news_articles: |
|
|
sentiment_analysis_result = analyze_sentiment(article['News_Article']) |
|
|
|
|
|
|
|
|
print(f'News Article: {sentiment_analysis_result["News_Article"]} : Sentiment: {sentiment_analysis_result["Sentiment"]}', '\n') |
|
|
|
|
|
result = { |
|
|
'News_Article': sentiment_analysis_result["News_Article"], |
|
|
'Sentiment': sentiment_analysis_result["Sentiment"][0]['label'] |
|
|
} |
|
|
|
|
|
analysis_results.append(result) |
|
|
|
|
|
|
|
|
|
|
|
dominant_sentiment = get_dominant_sentiment(analysis_results) |
|
|
print(dominant_sentiment) |
|
|
|
|
|
|
|
|
plot_sentiment_graph(analysis_results) |
|
|
|
|
|
|