AA-6055 commited on
Commit
443a737
·
verified ·
1 Parent(s): 5157ac7

Upload 4 files

Browse files
Files changed (4) hide show
  1. api.py +206 -0
  2. app.py +45 -0
  3. requirements.txt +12 -0
  4. utils.py +43 -0
api.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify, send_file
2
+ from bs4 import BeautifulSoup
3
+ from newspaper import Article
4
+ from textblob import TextBlob
5
+ # from newsapi import NewsApiClient
6
+ from transformers import pipeline
7
+ import requests
8
+ from utils import *
9
+ import pandas as pd
10
+ import base64
11
+ import json
12
+ # import nest_asyncio
13
+
14
+ app = Flask(__name__)
15
+
16
+ # newsapi = NewsApiClient(api_key='YOUR_NEWS_API_KEY') # Replace with your API key
17
+
18
+ # @app.route('/analyze_news', methods=['GET'])
19
+ # def analyze_news():
20
+ def analyze_news(company, source):
21
+ company = company
22
+ source = source
23
+ # company = request.args.get('company')
24
+ # source = request.args.get('source')
25
+ if not company or not source:
26
+ return jsonify({"error": "Please provide a company name as a query parameter"}), 400
27
+
28
+ all_articles = []
29
+ output = {"Company": f"{company}", "Articles": all_articles}
30
+
31
+ overall_sentiment_count = 0
32
+ sentiment_count = {"POSITIVE": 0, "NEGATIVE": 0, "NEUTRAL": 0}
33
+
34
+ if source == "NewsOrg":
35
+ # Fetch articles from News API
36
+ # response = newsapi.get_everything(q=company, page_size=5, sort_by='publishedAt', language='en')
37
+
38
+ params = {"q":"tesla","apiKey":"7396bdb0bc0a42c5b5b0c9c5945d32fa", "pagesize":10, "sortBy": "publishedAt", "language":'en'}
39
+ articles = requests.get(url = "https://newsapi.org/v2/everything", params= params)
40
+ articles = json.loads(articles.text)
41
+
42
+ # results = []
43
+ # sentiment_count = {"POSITIVE": 0, "NEGATIVE": 0, "NEUTRAL": 0}
44
+
45
+ # print(f">>>>>>>>>>>>>>>>>>>>>{articles}")
46
+ for idx, article in enumerate(articles["articles"]):
47
+ # print(f">>>>>>>>>>>>>>>>>>>>>{article}")
48
+ url = article.get("url")
49
+ news_article = Article(url)
50
+ try:
51
+ news_article.download()
52
+ news_article.parse()
53
+ except:
54
+ continue
55
+
56
+ blob = TextBlob(news_article.text)
57
+ polarity = blob.sentiment.polarity
58
+
59
+ if polarity > 0.3:
60
+ sentiment = "POSITIVE"
61
+ overall_sentiment_count += 1
62
+ elif polarity < -0.3:
63
+ sentiment = "NEGATIVE"
64
+ overall_sentiment_count -= 1
65
+ else:
66
+ sentiment = "NEUTRAL"
67
+ # neutral_sentiment_count += 1
68
+
69
+ sentiment_count[sentiment] += 1
70
+
71
+ all_articles.append({
72
+ "Title": article.get("title"),
73
+ "Summary": article.get("description"),
74
+ "Sentiment": sentiment
75
+ })
76
+
77
+ output["Comparitive Sentiment Score"] = {
78
+ "Sentiment Distribution": sentiment_count
79
+ }
80
+
81
+ if overall_sentiment_count>0:
82
+ output["Final Sentiment Analysis"] = f"{company.capitalize()}'s lastest news is mostly positive. Potential stock growth expected."
83
+ elif overall_sentiment_count<0:
84
+ output["Final Sentiment Analysis"] = f"{company.capitalize()}'s lastest news is mostly negative. Potential stock decline expected."
85
+ else:
86
+ output["Final Sentiment Analysis"] = f"{company.capitalize()}'s lastest news is mostly neutral. Stocks going to stay stagnant for some time."
87
+
88
+ print(output)
89
+ print(f"{'>'*5} Starting text summarization.")
90
+
91
+ # return jsonify({
92
+ # "company": company,
93
+ # "sentiment_distribution": sentiment_count,
94
+ # "articles": results
95
+ # })
96
+ # df = pd.DataFrame(all_articles)
97
+ text_to_summarize = " ".join([d['Title'] + " " + d['Summary'] for d in all_articles[:5]])
98
+ summary_final = summarize_text(text_to_summarize)
99
+
100
+ audio_path = generate_hindi_tts(summary_final)
101
+ if audio_path:# and os.path.exists(audio_path):
102
+ # Convert audio file to base64
103
+ with open(audio_path, "rb") as f:
104
+ audio_base64 = base64.b64encode(f.read()).decode('utf-8')
105
+
106
+ output["Audio"] = audio_base64
107
+
108
+ return output
109
+
110
+ else:
111
+ return jsonify({"error": "Failed to generate audio"}), 500
112
+
113
+ elif source == "Yahoo News":
114
+ url = f"https://finance.yahoo.com/quote/{company}/news/"
115
+ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'}
116
+ response = requests.get(url, headers=headers)
117
+
118
+ if response.status_code != 200:
119
+ print("Failed to fetch news articles")
120
+ return {}
121
+
122
+ paragraphs = []
123
+ titles = []
124
+ summaries = []
125
+ soup = BeautifulSoup(response.content, 'html.parser')
126
+
127
+
128
+ for news in soup.find_all("div", class_="holder yf-1napat3"):
129
+ title_all = news.find_all('h3', class_="clamp yf-82qtw3")
130
+ summary_all = news.find_all('p', class_="clamp yf-82qtw3")
131
+ for title, summary in zip(title_all, summary_all):
132
+ title_text = title.get_text()
133
+ summary_text = summary.get_text()
134
+ paragraph = title_text + ' ' + summary_text
135
+ titles.append(title_text)
136
+ summaries.append(summary_text)
137
+ paragraphs.append(paragraph)
138
+
139
+ # Analyze sentiment and prepare the output
140
+ for i, paragraph in enumerate(paragraphs):
141
+ sentiment = analyze_sentiment(paragraph)
142
+ if sentiment == "POSITIVE":
143
+ # positive_sentiment_count += 1
144
+ overall_sentiment_count += 1
145
+ elif sentiment == "NEGATIVE":
146
+ # negative_sentiment_count += 1
147
+ overall_sentiment_count -= 1
148
+ # else:
149
+ # neutral_sentiment_count += 1
150
+ # top_words =
151
+ sentiment_count[sentiment] += 1
152
+
153
+ article = {
154
+ "Title": titles[i],
155
+ "Summary": summaries[i],
156
+ "Sentiment": sentiment
157
+ }
158
+
159
+ all_articles.append(article)
160
+
161
+ output["Comparitive Sentiment Score"]["Sentiment Distribution"] = sentiment_count
162
+
163
+ if overall_sentiment_count>0:
164
+ output["Final Sentiment Analysis"] = f"{company.capitalize()}'s lastest news is mostly positive. Potential stock growth expected."
165
+ elif overall_sentiment_count<0:
166
+ output["Final Sentiment Analysis"] = f"{company.capitalize()}'s lastest news is mostly negative. Potential stock decline expected."
167
+ else:
168
+ output["Final Sentiment Analysis"] = f"{company.capitalize()}'s lastest news is mostly neutral. Stocks going to stay stagnant for some time."
169
+
170
+ df = pd.DataFrame(all_articles)
171
+ text_to_summarize = " ".join([d['Title'] + " " + d['summary'] for d in article[:5]])
172
+ summary_final = summarize_text(text_to_summarize)
173
+
174
+ audio_path = generate_hindi_tts(summary_final)
175
+ if audio_path:# and os.path.exists(audio_path):
176
+ # Convert audio file to base64
177
+ with open(audio_path, "rb") as f:
178
+ audio_base64 = base64.b64encode(f.read()).decode('utf-8')
179
+
180
+ output["Audio"] = audio_base64
181
+
182
+ return output
183
+
184
+ else:
185
+ return jsonify({"error": "Failed to generate audio"}), 500
186
+ # df = pd.DataFrame(all_articles)
187
+ # text_to_summarize = " ".join([d['Title'] + " " + d['summary'] for d in article[:5]])
188
+ # summary_final = summarize_text(text_to_summarize)
189
+
190
+ # audio_path = generate_hindi_tts(summary_final)
191
+ # if audio_path:# and os.path.exists(audio_path):
192
+ # # Convert audio file to base64
193
+ # with open(audio_path, "rb") as f:
194
+ # audio_base64 = base64.b64encode(f.read()).decode('utf-8')
195
+
196
+ # output["Audio"] = audio_base64
197
+
198
+ # return output
199
+
200
+ # else:
201
+ # return jsonify({"error": "Failed to generate audio"}), 500
202
+ else:
203
+ return jsonify({"error": "Invalid source provided"}), 400
204
+
205
+ if __name__ == '__main__':
206
+ app.run(debug=True, port=8000)
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from api import analyze_news
3
+ import base64
4
+ import nest_asyncio
5
+
6
+ nest_asyncio.apply()
7
+
8
+ st.title("Company News Sentiment Analyzer")
9
+
10
+ company = st.text_input("Enter Company Name", placeholder="Example: Tesla")
11
+ source = st.selectbox("Select the source you want news from:", ("NewsOrg", "Yahoo News"))
12
+
13
+ if st.button("Fetch News & Analyze"):
14
+ if not company or not source:
15
+ st.error("Please enter a company name and select the source")
16
+ else:
17
+ with st.spinner("Fetching from API..."):
18
+ data = analyze_news(company=company, source=source)
19
+ if data:
20
+ st.subheader(f"Sentiment Analysis for {data['Company']}")
21
+
22
+ # Comparative Sentiment Score
23
+ st.write("### Comparative Sentiment Score")
24
+ sentiment_dist = data.get("Comparitive Sentiment Score", {})
25
+ st.json(sentiment_dist)
26
+
27
+ # Final Sentiment Analysis
28
+ st.write("### Final Sentiment Analysis")
29
+ st.success(data.get("Final Sentiment Analysis", "No analysis available"))
30
+
31
+ # Audio Player for Hindi TTS
32
+ if "Audio" in data:
33
+ st.write("### Listen to Hindi Summary")
34
+ audio_bytes = base64.b64decode(data["Audio"])
35
+ st.audio(audio_bytes, format="audio/mp3")
36
+
37
+ # Articles Display
38
+ st.write("### Articles:")
39
+ for idx, article in enumerate(data["Articles"]):
40
+ st.write(f"**{idx+1}. Title:** {article['Title']}")
41
+ st.write(f"**Sentiment:** {article['Sentiment']}")
42
+ st.write(f"**Summary:** {article['Summary']}")
43
+ st.markdown("---")
44
+ else:
45
+ st.error("Failed to fetch data from API.")
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ flask
2
+ streamlit
3
+ beautifulsoup4
4
+ torch
5
+ transformers
6
+ gtts
7
+ newspaper3k
8
+ pandas
9
+ textblob
10
+ requests
11
+ lxml_html_clean
12
+ googletrans==4.0.0-rc1
utils.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ from gtts import gTTS
3
+ from googletrans import Translator
4
+
5
+ # Loading models
6
+ summarizer = pipeline("summarization", model="facebook/bart-large-cnn") # Load summarizer
7
+ sentiment_analyzer = pipeline("sentiment-analysis") # Load sentiment analyzer
8
+ # classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli") # Load classifier
9
+
10
+ def analyze_sentiment(text):
11
+ result = sentiment_analyzer(text[:500])[0]
12
+ return result['label']
13
+
14
+ def summarize_text(text):
15
+ cleaned_text = text.strip().replace("\n", " ")
16
+ cleaned_text = cleaned_text[:3000] # Limit to avoid token overflow
17
+
18
+ result = summarizer(
19
+ cleaned_text,
20
+ max_length=130,
21
+ min_length=30,
22
+ do_sample=False
23
+ )
24
+
25
+ summary_text = result[0]['summary_text']
26
+ return summary_text
27
+
28
+
29
+ def translate_to_hindi(text):
30
+ translator = Translator()
31
+ result = translator.translate(text, dest='hi')
32
+ return result.text
33
+
34
+ def generate_hindi_tts(text, filename="output.mp3"):
35
+ try:
36
+ hindi_text = translate_to_hindi(text)
37
+ tts = gTTS(text=hindi_text, lang='hi')
38
+ tts.save(filename)
39
+ print(f"Hindi audio saved to {filename}")
40
+ return filename
41
+ except Exception as e:
42
+ print(f"Error in generating the TTS: {e}")
43
+ return None