| |
|
| | import os |
| | from dotenv import load_dotenv |
| | from transformers import pipeline |
| | import os |
| | import pandas as pd |
| | from GoogleNews import GoogleNews |
| | from langchain_openai import ChatOpenAI |
| | import pandas as pd |
| | import praw |
| | from datetime import datetime |
| |
|
| | load_dotenv() |
| |
|
| | def fetch_news(stockticker): |
| | |
| | """ Fetches news articles for a given stock symbol within a specified date range. |
| | |
| | Args: |
| | - stockticker (str): Symbol of a particular stock |
| | |
| | Returns: |
| | - list: A list of dictionaries containing stock news. """ |
| | |
| | load_dotenv() |
| | days_to_fetch_news = os.environ["DAYS_TO_FETCH_NEWS"] |
| |
|
| | googlenews = GoogleNews() |
| | googlenews.set_period(days_to_fetch_news) |
| | googlenews.get_news(stockticker) |
| | news_json=googlenews.get_texts() |
| | urls=googlenews.get_links() |
| | |
| | no_of_news_articles_to_fetch = os.environ["NO_OF_NEWS_ARTICLES_TO_FETCH"] |
| | news_article_list = [] |
| | counter = 0 |
| | for article in news_json: |
| | |
| | if(counter >= int(no_of_news_articles_to_fetch)): |
| | break |
| |
|
| | relevant_info = { |
| | 'News_Article': article, |
| | 'URL': urls[counter] |
| | } |
| | news_article_list.append(relevant_info) |
| | counter+=1 |
| | return news_article_list |
| |
|
| | def fetch_reddit_news(cryptocurrencyticker): |
| | load_dotenv() |
| | REDDIT_USER_AGENT= os.environ["REDDIT_USER_AGENT"] |
| | REDDIT_CLIENT_ID= os.environ["REDDIT_CLIENT_ID"] |
| | REDDIT_CLIENT_SECRET= os.environ["REDDIT_CLIENT_SECRET"] |
| | |
| | user_agent = REDDIT_USER_AGENT |
| | reddit = praw.Reddit ( |
| | client_id= REDDIT_CLIENT_ID, |
| | client_secret= REDDIT_CLIENT_SECRET, |
| | user_agent=user_agent |
| | ) |
| |
|
| | headlines = set ( ) |
| | for submission in reddit.subreddit('CryptoCurrencyTrading').search(cryptocurrencyticker,time_filter='week'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('CryptoCurrencyTrading').search(cryptocurrencyticker,time_filter='year'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('CryptoCurrencyTrading').search(cryptocurrencyticker): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| |
|
| | |
| | for submission in reddit.subreddit('CoinBase').search(cryptocurrencyticker,time_filter='week'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('CoinBase').search(cryptocurrencyticker,time_filter='year'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('CoinBase').search(cryptocurrencyticker): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| |
|
| | |
| | for submission in reddit.subreddit('coingecko').search(cryptocurrencyticker,time_filter='week'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('coingecko').search(cryptocurrencyticker,time_filter='year'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('coingecko').search(cryptocurrencyticker): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| |
|
| | |
| | for submission in reddit.subreddit('CryptoCurrency').search(cryptocurrencyticker,time_filter='week'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('CryptoCurrency').search(cryptocurrencyticker,time_filter='year'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('CryptoCurrency').search(cryptocurrencyticker): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| |
|
| | |
| | for submission in reddit.subreddit('ShitcoinCentral').search(cryptocurrencyticker,time_filter='week'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('ShitcoinCentral').search(cryptocurrencyticker,time_filter='year'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('ShitcoinCentral').search(cryptocurrencyticker): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| |
|
| | |
| | for submission in reddit.subreddit('shitcoinmoonshots').search(cryptocurrencyticker,time_filter='week'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('shitcoinmoonshots').search(cryptocurrencyticker,time_filter='year'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('shitcoinmoonshots').search(cryptocurrencyticker): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| |
|
| | |
| | for submission in reddit.subreddit('solana').search(cryptocurrencyticker,time_filter='week'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('solana').search(cryptocurrencyticker,time_filter='year'): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| | if len(headlines)<10: |
| | for submission in reddit.subreddit('solana').search(cryptocurrencyticker): |
| | headlines.add(submission.title + ', Date: ' +datetime.utcfromtimestamp(int(submission.created_utc)).strftime('%Y-%m-%d %H:%M:%S') + ', URL:' +submission.url) |
| |
|
| | return headlines |
| |
|
| | def analyze_sentiment(article): |
| | """ |
| | Analyzes the sentiment of a given news article. |
| | |
| | Args: |
| | - news_article (dict): Dictionary containing 'summary', 'headline', and 'created_at' keys. |
| | |
| | Returns: |
| | - dict: A dictionary containing sentiment analysis results. |
| | """ |
| |
|
| | |
| | |
| |
|
| | |
| | classifier = pipeline(model='mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis') |
| | sentiment_result = classifier(str(article)) |
| |
|
| | analysis_result = { |
| | 'News_Article': article, |
| | 'Sentiment': sentiment_result |
| | } |
| |
|
| | return analysis_result |
| |
|
| |
|
| | def generate_summary_of_sentiment(sentiment_analysis_results): |
| | |
| | |
| | news_article_sentiment = str(sentiment_analysis_results) |
| | print("News article sentiment : " + news_article_sentiment) |
| | |
| |
|
| | os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_API_KEY"] |
| | model = ChatOpenAI( |
| | model="gpt-4o", |
| | temperature=0, |
| | max_tokens=None, |
| | timeout=None, |
| | max_retries=2, |
| | |
| | |
| | |
| | |
| | ) |
| |
|
| | messages=[ |
| | {"role": "system", "content": "You are a helpful assistant that looks at all news articles with their sentiment, hyperlink and date in front of the article text, the articles MUST be ordered by date!, and generate a summary rationalizing dominant sentiment. At the end of the summary, add URL links with dates for all the articles in the markdown format for streamlit. Make sure the articles as well as the links are ordered descending by Date!!!!!!! Example of adding the URLs: The Check out the links: [link](%s) % url, 2024-03-01. "}, |
| | {"role": "user", "content": f"News articles and their sentiments: {news_article_sentiment}"} |
| | ] |
| | response = model.invoke(messages) |
| | |
| |
|
| | summary = response.content |
| | print ("+++++++++++++++++++++++++++++++++++++++++++++++") |
| | print(summary) |
| | print ("+++++++++++++++++++++++++++++++++++++++++++++++") |
| | return summary |
| |
|
| |
|
| | def plot_sentiment_graph(sentiment_analysis_results): |
| | """ |
| | Plots a sentiment analysis graph |
| | |
| | Args: |
| | - sentiment_analysis_result): (dict): Dictionary containing 'Review Title : Summary', 'Rating', and 'Sentiment' keys. |
| | |
| | Returns: |
| | - dict: A dictionary containing sentiment analysis results. |
| | """ |
| | df = pd.DataFrame(sentiment_analysis_results) |
| | print(df) |
| |
|
| | |
| | grouped = df['Sentiment'].value_counts() |
| |
|
| | sentiment_counts = df['Sentiment'].value_counts() |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | return sentiment_counts |
| |
|
| |
|
| | def get_dominant_sentiment (sentiment_analysis_results): |
| | """ |
| | Returns overall sentiment, negative or positive or neutral depending on the count of negative sentiment vs positive sentiment |
| | |
| | Args: |
| | - sentiment_analysis_result): (dict): Dictionary containing 'summary', 'headline', and 'created_at' keys. |
| | |
| | Returns: |
| | - dict: A dictionary containing sentiment analysis results. |
| | """ |
| | df = pd.DataFrame(sentiment_analysis_results) |
| |
|
| | |
| | sentiment_counts = df['Sentiment'].value_counts().reset_index() |
| | sentiment_counts.columns = ['sentiment', 'count'] |
| | print(sentiment_counts) |
| |
|
| | |
| | dominant_sentiment = sentiment_counts.loc[sentiment_counts['count'].idxmax()] |
| |
|
| | return dominant_sentiment['sentiment'] |
| |
|
| | |
| | if __name__ == '__main__': |
| | |
| | |
| | news_articles = fetch_news('AAPL') |
| |
|
| | analysis_results = [] |
| | |
| | |
| | for article in news_articles: |
| | sentiment_analysis_result = analyze_sentiment(article['News_Article']) |
| |
|
| | |
| | print(f'News Article: {sentiment_analysis_result["News_Article"]} : Sentiment: {sentiment_analysis_result["Sentiment"]}', '\n') |
| |
|
| | result = { |
| | 'News_Article': sentiment_analysis_result["News_Article"], |
| | 'Sentiment': sentiment_analysis_result["Sentiment"][0]['label'] |
| | } |
| | |
| | analysis_results.append(result) |
| |
|
| | |
| | |
| | dominant_sentiment = get_dominant_sentiment(analysis_results) |
| | print(dominant_sentiment) |
| | |
| | |
| | plot_sentiment_graph(analysis_results) |
| |
|
| |
|