Sasmita Harini commited on
Commit
37b27d1
Β·
1 Parent(s): 8279aeb

Updated app.py with new title and utils.py with latest fetch logic

Browse files
README.md CHANGED
@@ -1,34 +1,13 @@
1
- ---
2
- title: News Summarization and Text-to-Speech
3
- emoji: πŸ“°
4
- colorFrom: blue
5
- colorTo: green
6
- sdk: streamlit
7
- sdk_version: "1.36.0"
8
- app_file: app.py
9
- pinned: false
10
- ---
11
 
12
- # News Summarization and Text-to-Speech App
13
 
14
- This Space fetches news articles about a specified company, summarizes them, performs sentiment analysis, and generates a Hindi audio summary of the final sentiment.
 
 
15
 
16
- ## How to Use
17
- 1. Enter a company name (e.g., "Tesla") in the text box.
18
- 2. Click "Fetch News".
19
- 3. View the summarized news, download the text file, and listen to or download the Hindi audio of the sentiment analysis.
20
-
21
- ## Features
22
- - Fetches news from multiple RSS feeds.
23
- - Summarizes articles using T5 model.
24
- - Performs sentiment analysis and topic extraction.
25
- - Translates sentiment to Hindi and generates audio.
26
-
27
- ## Dependencies
28
- See `requirements.txt` for the full list of Python packages.
29
-
30
- ## Notes
31
- - Requires a Groq API key (set as a secret in Space settings).
32
- - Limited to 10 articles per request to manage resources.
33
-
34
- Check out the configuration reference at [https://huggingface.co/docs/hub/spaces-config-reference](https://huggingface.co/docs/hub/spaces-config-reference).
 
1
+ # News Summarization and Text-to-Speech Application
 
 
 
 
 
 
 
 
 
2
 
3
+ This application fetches news articles about a specified company, summarizes them, performs sentiment analysis, and generates a Hindi audio summary of the final sentiment.
4
 
5
+ ## Prerequisites
6
+ - Python 3.10+
7
+ - A Groq API key (set as an environment variable: `GROQ_API_KEY`)
8
 
9
+ ## Setup Instructions
10
+ 1. Clone the repository:
11
+ ```bash
12
+ git clone <repository-url>
13
+ cd <repository-directory>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
__pycache__/api.cpython-39.pyc ADDED
Binary file (3.08 kB). View file
 
__pycache__/app.cpython-39.pyc ADDED
Binary file (4.42 kB). View file
 
__pycache__/backend.cpython-39.pyc ADDED
Binary file (5.79 kB). View file
 
__pycache__/utils.cpython-39.pyc CHANGED
Binary files a/__pycache__/utils.cpython-39.pyc and b/__pycache__/utils.cpython-39.pyc differ
 
api.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ import utils
4
+ from deep_translator import GoogleTranslator
5
+ from gtts import gTTS
6
+ import base64
7
+ import io
8
+ import json
9
+ import uvicorn
10
+ import logging
11
+
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+
15
+ app = FastAPI(title="News Analysis API")
16
+ translator = GoogleTranslator(source='en', target='hi')
17
+
18
+ class CompanyRequest(BaseModel):
19
+ company_name: str
20
+
21
+ @app.post("/api/fetch_news")
22
+ async def fetch_news(request: CompanyRequest):
23
+ try:
24
+ company_name = request.company_name.strip().lower()
25
+ if not company_name:
26
+ raise HTTPException(status_code=400, detail="Company name is required")
27
+
28
+ logger.info(f"Fetching news for {company_name}")
29
+ file_name = utils.fetch_and_save_news(company_name)
30
+ if not file_name:
31
+ logger.warning(f"No news found for {company_name}")
32
+ raise HTTPException(status_code=404, detail=f"No news found for {company_name}")
33
+
34
+ with open(file_name, "r", encoding="utf-8") as file:
35
+ content = file.read()
36
+
37
+ try:
38
+ news_data = json.loads(content) # Should work with updated utils.py
39
+ logger.info(f"Successfully parsed news data for {company_name}")
40
+ return news_data
41
+ except json.JSONDecodeError as e:
42
+ logger.error(f"JSON parsing failed: {str(e)}", exc_info=True)
43
+ raise HTTPException(status_code=500, detail=f"Error parsing JSON: {str(e)}")
44
+
45
+ except Exception as e:
46
+ logger.error(f"Error in fetch_news: {str(e)}", exc_info=True)
47
+ raise HTTPException(status_code=500, detail=f"Error fetching news: {str(e)}")
48
+
49
+ @app.post("/api/text_to_speech")
50
+ async def text_to_speech(request: CompanyRequest):
51
+ try:
52
+ company_name = request.company_name.strip().lower()
53
+ if not company_name:
54
+ raise HTTPException(status_code=400, detail="Company name is required")
55
+
56
+ file_name = f"{company_name}_news.txt"
57
+ try:
58
+ with open(file_name, "r", encoding="utf-8") as file:
59
+ news_data = json.load(file)
60
+ sentiment_text = news_data.get("Final Sentiment Analysis", "")
61
+ if not sentiment_text:
62
+ raise HTTPException(status_code=404, detail="Sentiment analysis not found")
63
+
64
+ hindi_text = translator.translate(sentiment_text)
65
+ tts = gTTS(text=hindi_text, lang='hi')
66
+ mp3_fp = io.BytesIO()
67
+ tts.write_to_fp(mp3_fp)
68
+ mp3_fp.seek(0)
69
+ audio_base64 = base64.b64encode(mp3_fp.read()).decode('utf-8')
70
+ return {"text": hindi_text, "audio_base64": audio_base64}
71
+ except FileNotFoundError:
72
+ raise HTTPException(status_code=404, detail=f"News file for {company_name} not found")
73
+ except Exception as e:
74
+ logger.error(f"Error in text_to_speech: {str(e)}", exc_info=True)
75
+ raise HTTPException(status_code=500, detail=f"Error generating speech: {str(e)}")
76
+
77
+ @app.get("/api/health")
78
+ async def health_check():
79
+ return {"status": "healthy"}
80
+
81
+ if __name__ == "__main__":
82
+ uvicorn.run("api:app", host="0.0.0.0", port=8000, reload=True)
app.py CHANGED
@@ -1,80 +1,123 @@
1
  import streamlit as st
2
- import utils # Import functions from utils.py
3
- import os
4
- from gtts import gTTS
5
- import tempfile
6
- import re
7
- from deep_translator import GoogleTranslator
8
 
9
  st.title("News Summarization and Text-to-Speech Application")
10
 
11
- # User input for company name
12
  company_name = st.text_input("Enter the company name:", "").strip().lower()
13
 
14
  if st.button("Fetch News"):
15
  if company_name:
16
- # Run news extraction and analysis
17
- st.write(f"Fetching news for **{company_name}**...")
18
-
19
- # Call the function from utils.py
20
- file_name = utils.fetch_and_save_news(company_name)
21
-
22
- if os.path.exists(file_name):
23
- st.success(f"Data saved in **{file_name}**")
 
24
 
25
- # Read the file to display content
26
- with open(file_name, "r", encoding="utf-8") as file:
27
- text_content = file.read()
28
- st.text_area("News Analysis", text_content, height=400)
29
-
30
- # Provide a download button for text file
31
- with open(file_name, "rb") as file:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  st.download_button(
33
- label="Download Text File",
34
- data=file,
35
- file_name=file_name,
36
- mime="text/plain"
37
  )
38
-
39
- # Extract only the Final Sentiment Analysis line
40
- final_sentiment_line = ""
41
- with open(file_name, "r", encoding="utf-8") as file:
42
- content = file.read()
43
- # Use regular expression to find the Final Sentiment Analysis line
44
- match = re.search(r'"Final Sentiment Analysis": "([^"]+)"', content)
45
- if match:
46
- final_sentiment_line = match.group(1)
47
-
48
- if final_sentiment_line:
49
- st.subheader("Hindi Audio for Final Sentiment Analysis")
50
 
51
- try:
52
- # First translate the English text to Hindi using deep_translator
53
- translator = GoogleTranslator(source='en', target='hi')
54
- hindi_text = translator.translate(final_sentiment_line)
55
-
56
- # Create Hindi audio from the translated text
57
- tts = gTTS(text=hindi_text, lang='hi', slow=False)
58
-
59
- # Save the audio in a temporary file
60
- temp_audio_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
61
- tts.save(temp_audio_file.name)
62
-
63
-
64
- # Provide download button for the audio
65
- with open(temp_audio_file.name, "rb") as audio_file:
66
- audio_data = audio_file.read()
67
- st.download_button(
68
- label="Download Hindi Audio",
69
- data=audio_data,
70
- file_name=f"{company_name}_sentiment_hindi.mp3",
71
- mime="audio/mp3"
72
- )
73
- except Exception as e:
74
- st.error(f"Error generating Hindi audio: {str(e)}")
75
- else:
76
- st.warning("Could not find Final Sentiment Analysis in the text.")
77
- else:
78
- st.error("No relevant news articles found.")
 
 
79
  else:
80
  st.warning("Please enter a company name.")
 
1
  import streamlit as st
2
+ import requests
3
+ import json
4
+ import base64
5
+ import io
6
+
7
+ API_BASE_URL = "http://localhost:8000/api"
8
 
9
  st.title("News Summarization and Text-to-Speech Application")
10
 
 
11
  company_name = st.text_input("Enter the company name:", "").strip().lower()
12
 
13
  if st.button("Fetch News"):
14
  if company_name:
15
+ status = st.status("Fetching news...", expanded=True)
16
+ status.write(f"Fetching news for **{company_name}**...")
17
+ try:
18
+ response = requests.post(
19
+ f"{API_BASE_URL}/fetch_news",
20
+ json={"company_name": company_name},
21
+ timeout=120
22
+ )
23
+ response.raise_for_status()
24
 
25
+ news_data = response.json()
26
+ if not news_data or "Company" not in news_data:
27
+ status.update(label="No news found", state="error")
28
+ st.warning(f"No news found for {company_name}")
29
+ else:
30
+ status.update(label="News fetched successfully!", state="complete", expanded=False)
31
+
32
+ st.subheader(f"News Analysis for {news_data['Company']}")
33
+
34
+ # Articles section
35
+ st.subheader("Articles")
36
+ with st.expander("View Articles", expanded=False):
37
+ for i, article in enumerate(news_data['Articles']):
38
+ st.markdown(f"#### Article {i+1}: {article['Title']}")
39
+ st.markdown(f"**Summary:** {article['Summary']}")
40
+ st.markdown(f"**Sentiment:** {article['Sentiment']}")
41
+ st.markdown(f"**Topics:** {', '.join(article['Topics'])}")
42
+ st.divider()
43
+
44
+ # Sentiment Distribution
45
+ st.subheader("Sentiment Distribution")
46
+ sentiment_data = news_data['Comparative Sentiment Score']['Sentiment Distribution']
47
+ col1, col2, col3 = st.columns(3)
48
+ col1.metric("Positive", sentiment_data['Positive'])
49
+ col2.metric("Neutral", sentiment_data['Neutral'])
50
+ col3.metric("Negative", sentiment_data['Negative'])
51
+
52
+ # Topic Analysis
53
+ st.subheader("Topic Analysis")
54
+ with st.expander("View Topic Analysis", expanded=False):
55
+ st.markdown("**Common Topics:**")
56
+ st.write(", ".join(news_data['Topic Overlap']['Common Topics']))
57
+ for key, value in news_data['Topic Overlap'].items():
58
+ if key != "Common Topics":
59
+ st.markdown(f"**{key}:**")
60
+ st.write(", ".join(value))
61
+
62
+ # Coverage Differences
63
+ st.subheader("Coverage Differences")
64
+ with st.expander("View Comparative Analysis", expanded=False):
65
+ coverage_diff = news_data['Coverage Differences']
66
+ if isinstance(coverage_diff, str):
67
+ st.write(coverage_diff) # Fallback for error cases
68
+ else:
69
+ # Format line-by-line
70
+ formatted_text = '"Coverage Differences": [\n'
71
+ for i, item in enumerate(coverage_diff.get("Coverage Differences", [])):
72
+ formatted_text += "{\n"
73
+ formatted_text += f' "Comparison": "{item["Comparison"]}",\n'
74
+ formatted_text += f' "Impact": "{item["Impact"]}"\n'
75
+ formatted_text += "}" + (",\n" if i < len(coverage_diff["Coverage Differences"]) - 1 else "\n")
76
+ formatted_text += "]"
77
+ st.code(formatted_text, language="json")
78
+
79
+ # Final Sentiment Analysis
80
+ st.subheader("Final Sentiment Analysis")
81
+ st.info(news_data['Final Sentiment Analysis'])
82
+
83
+ # Download JSON
84
+ st.subheader("Download Data")
85
  st.download_button(
86
+ label="Download JSON File",
87
+ data=json.dumps(news_data, indent=4),
88
+ file_name=f"{company_name}_news.json",
89
+ mime="application/json"
90
  )
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
+ # Hindi Audio
93
+ st.subheader("Hindi Audio for Final Sentiment Analysis")
94
+ audio_response = requests.post(
95
+ f"{API_BASE_URL}/text_to_speech",
96
+ json={"company_name": company_name},
97
+ timeout=60
98
+ )
99
+ audio_response.raise_for_status()
100
+ audio_data = audio_response.json()
101
+ #st.markdown(f"**Hindi translation:**")
102
+ #st.text(audio_data["text"])
103
+ audio_bytes = base64.b64decode(audio_data["audio_base64"])
104
+ #st.audio(audio_bytes, format="audio/mp3")
105
+ st.download_button(
106
+ label="Download Hindi Audio",
107
+ data=audio_bytes,
108
+ file_name=f"{company_name}_sentiment_hindi.mp3",
109
+ mime="audio/mp3"
110
+ )
111
+
112
+ except requests.exceptions.RequestException as e:
113
+ status.update(label="Connection error", state="error")
114
+ st.error(f"Error connecting to API: {str(e)}")
115
+ st.info("Make sure the FastAPI backend is running on http://localhost:8000")
116
+ except json.JSONDecodeError:
117
+ status.update(label="Invalid response", state="error")
118
+ st.error("Received invalid data from the API")
119
+ except Exception as e:
120
+ status.update(label="Processing error", state="error")
121
+ st.error(f"Error processing news data: {str(e)}")
122
  else:
123
  st.warning("Please enter a company name.")
requirements.txt CHANGED
@@ -1,17 +1,15 @@
1
- requests
2
- beautifulsoup4
3
- transformers
4
- nltk
5
- streamlit
6
- gtts
7
- newspaper3k
8
- requests>=2.31.0
9
- beautifulsoup4>=4.12.3
10
- transformers>=4.35.2
11
  torch>=2.1.0 # Required by transformers for T5 model
12
- keybert>=0.7.0
13
- spacy>=3.7.2
14
- nltk>=3.8.1
15
- groq>=0.4.2
16
  sentencepiece>=0.1.99 # Required by T5Tokenizer
17
- deep_translator
 
 
 
 
 
 
1
+ requests==2.31.0
2
+ beautifulsoup4==4.12.3
3
+ transformers==4.38.2
 
 
 
 
 
 
 
4
  torch>=2.1.0 # Required by transformers for T5 model
5
+ keybert==0.8.4
6
+ spacy==3.7.4
7
+ nltk==3.8.1
8
+ groq==0.4.2
9
  sentencepiece>=0.1.99 # Required by T5Tokenizer
10
+ streamlit==1.36.0
11
+ fastapi==0.115.0
12
+ pydantic==2.6.4
13
+ uvicorn==0.30.6
14
+ deep-translator==1.11.4
15
+ gtts==2.5.3
utils.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import requests
2
  from bs4 import BeautifulSoup
3
  import time
@@ -62,10 +64,10 @@ rss_feeds = [
62
  "https://www.economist.com/business/rss.xml", # The Economist Business
63
  "https://www.ft.com/companies/financials/rss", # Financial Times Financials (Visa-relevant)
64
  "https://www.ft.com/rss/companies/technology", # Financial Times Tech Companies
65
- "https://feeds.a.dj.com/rss/WSJcomUSBusiness.xml", # Wall Street Journal US Business
66
- "https://www.forbes.com/money/feed/", # Forbes Money
67
- "https://www.reuters.com/arc/outboundfeeds/business/?outputType=xml", # Reuters Business
68
- "https://www.bloomberg.com/feed/podcasts/markets.xml", # Bloomberg Markets
69
  "https://finance.yahoo.com/news/rssindex", # Yahoo Finance News
70
  "https://www.nasdaq.com/feed/rssoutbound", # Nasdaq News
71
  "https://www.marketwatch.com/rss/topstories", # MarketWatch Top Stories
@@ -77,10 +79,11 @@ rss_feeds = [
77
  "https://www.theguardian.com/world/rss", # The Guardian World
78
  "https://feeds.npr.org/1001/rss.xml", # NPR News
79
  "https://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml", # NYT Home Page
80
- "https://apnews.com/hub/business?format=rss", # Associated Press Business
81
- "https://feeds.washingtonpost.com/rss/business", # Washington Post Business
82
  ]
83
 
 
84
  headers = {
85
  "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
86
  }
@@ -197,6 +200,7 @@ def get_coverage_differences(articles, company_name):
197
  }}
198
  ]
199
  }}
 
200
  """
201
  try:
202
  completion = client.chat.completions.create(
@@ -212,13 +216,14 @@ def get_coverage_differences(articles, company_name):
212
  for chunk in completion:
213
  coverage_diff += chunk.choices[0].delta.content or ""
214
 
215
- text = coverage_diff.strip()
216
  pattern = r'```json\s*([\s\S]*?)\s*```'
217
  match = re.search(pattern, text)
218
 
219
  if match:
220
- json_str = match.group(1)
221
  try:
 
222
  json_dict = json.loads(json_str)
223
  json_dict = json.dumps(json_dict, indent=4)
224
  return json_dict
@@ -229,6 +234,8 @@ def get_coverage_differences(articles, company_name):
229
  except Exception as e:
230
  return f"Error in Groq API call: {str(e)}"
231
 
 
 
232
  def similarity_based_common_topics(processed_articles, similarity_threshold=0.8, min_articles=2):
233
  keyword_clusters = defaultdict(list)
234
  for article in processed_articles:
@@ -297,7 +304,6 @@ def comparative_analysis(processed_articles, company_name):
297
  deduplicated_unique.add(topic)
298
  unique_topics[f"Unique Topics in Article {idx+1}"] = deduplicated_unique
299
  final_sentiment = max(sentiment_summary, key=sentiment_summary.get)
300
-
301
  # Add stock growth expectation based on sentiment
302
  if final_sentiment == "Positive":
303
  sentiment_statement = (f"{company_name}’s latest news coverage is mostly {final_sentiment.lower()}. "
@@ -318,51 +324,88 @@ def fetch_and_save_news(company_name):
318
  if not company_name:
319
  print("❌ Error: Company name is required")
320
  return None
321
- file_name = f"{company_name}_news.txt"
 
322
  articles = []
323
- article_count = 0
324
- article_limit = 10
325
- print(f"πŸš€ Starting parallel fetching for company: {company_name}...")
326
  article_queue = queue.Queue()
327
  article_limit_reached = threading.Event()
328
- with concurrent.futures.ThreadPoolExecutor(max_workers=10) as fetch_executor:
329
- fetch_futures = [fetch_executor.submit(fetch_articles_from_rss, url, company_name, article_queue, article_limit_reached)
330
- for url in rss_feeds]
331
- with concurrent.futures.ThreadPoolExecutor(max_workers=5) as process_executor:
 
 
 
 
 
 
 
 
 
 
 
 
332
  processing_futures = []
333
- while article_count < article_limit and (not article_queue.empty() or not all(f.done() for f in fetch_futures)):
 
 
334
  try:
335
- article_data = article_queue.get(timeout=0.1)
336
- future = process_executor.submit(process_article_content, article_data)
 
 
 
 
 
 
337
  processing_futures.append(future)
 
338
  except queue.Empty:
339
- continue
 
 
 
 
 
340
  for future in concurrent.futures.as_completed(processing_futures):
341
- if article_count >= article_limit:
342
- article_limit_reached.set()
343
- break
344
  result = future.result()
345
  if result:
346
  articles.append(result)
347
- article_count += 1
348
- print(f"πŸ“Š Processed {article_count}/{article_limit} articles")
349
- if article_count >= article_limit:
 
350
  article_limit_reached.set()
351
- print(f"βœ… Reached article limit of {article_limit}. Stopping search.")
352
  break
 
 
353
  articles = articles[:article_limit]
354
  if not articles:
355
- print(f"❌ No relevant articles found for company: {company_name}")
356
  return None
 
357
  print(f"βœ… Saving {len(articles)} articles to {file_name}")
358
  analysis_result = comparative_analysis(articles, company_name)
359
  coverage_differences = get_coverage_differences(articles, company_name)
 
 
 
 
 
 
 
 
 
360
  sentiment_distribution = {"Positive": 0, "Negative": 0, "Neutral": 0}
361
  for article in articles:
362
  sentiment_distribution[article["sentiment"]] += 1
 
363
  formatted_articles = [{"Title": article["title"], "Summary": article["summary"],
364
  "Sentiment": article["sentiment"], "Topics": article["keywords"].split(", ")}
365
  for article in articles]
 
366
  output_data = {
367
  "Company": company_name,
368
  "Articles": formatted_articles,
@@ -374,34 +417,11 @@ def fetch_and_save_news(company_name):
374
  },
375
  "Final Sentiment Analysis": analysis_result['Final Sentiment Analysis']
376
  }
 
377
  with open(file_name, "w", encoding="utf-8") as file:
378
- file.write(f'"Company": "{output_data["Company"]}",\n')
379
- file.write('"Articles": [\n')
380
- for i, article in enumerate(output_data["Articles"]):
381
- file.write('{\n')
382
- file.write(f'"Title": "{article["Title"]}",\n')
383
- file.write(f'"Summary": "{article["Summary"]}",\n')
384
- file.write(f'"Sentiment": "{article["Sentiment"]}",\n')
385
- file.write(f'"Topics": {article["Topics"]}\n')
386
- file.write('}' + (',\n' if i < len(output_data["Articles"]) - 1 else '\n'))
387
- file.write('],\n')
388
- file.write('"Comparative Sentiment Score": {\n')
389
- file.write('"Sentiment Distribution": {\n')
390
- for i, (sentiment, count) in enumerate(output_data["Comparative Sentiment Score"]["Sentiment Distribution"].items()):
391
- file.write(f'"{sentiment}": {count}' + (',' if i < 2 else '') + '\n')
392
- file.write('}\n')
393
- file.write('},\n')
394
- file.write(f'{output_data["Coverage Differences"]},\n')
395
- file.write('"Topic Overlap": {\n')
396
- file.write(f'"Common Topics": {output_data["Topic Overlap"]["Common Topics"]},\n')
397
- for i, (key, value) in enumerate([(k, v) for k, v in output_data["Topic Overlap"].items() if k != "Common Topics"]):
398
- file.write(f'"{key}": {value}' + (',\n' if i < len(output_data["Topic Overlap"]) - 2 else '\n'))
399
- file.write('},\n')
400
- file.write(f'"Final Sentiment Analysis": "{output_data["Final Sentiment Analysis"]}"\n')
401
- print("\nOutput format:")
402
- with open(file_name, "r", encoding="utf-8") as file:
403
- print(file.read())
404
- print("βœ… File saved successfully!")
405
  return file_name
406
 
407
  if __name__ == "__main__":
 
1
+ # utils.py
2
+
3
  import requests
4
  from bs4 import BeautifulSoup
5
  import time
 
64
  "https://www.economist.com/business/rss.xml", # The Economist Business
65
  "https://www.ft.com/companies/financials/rss", # Financial Times Financials (Visa-relevant)
66
  "https://www.ft.com/rss/companies/technology", # Financial Times Tech Companies
67
+ "https://feeds.a.dj.com/rss/WSJcomUSBusiness.xml", # Wall Street Journal US Business (updated URL)
68
+ "https://www.forbes.com/money/feed/", # Forbes Money (updated URL)
69
+ "https://www.reuters.com/arc/outboundfeeds/business/?outputType=xml", # Reuters Business (updated URL)
70
+ "https://www.bloomberg.com/feed/podcasts/markets.xml", # Bloomberg Markets (updated URL)
71
  "https://finance.yahoo.com/news/rssindex", # Yahoo Finance News
72
  "https://www.nasdaq.com/feed/rssoutbound", # Nasdaq News
73
  "https://www.marketwatch.com/rss/topstories", # MarketWatch Top Stories
 
79
  "https://www.theguardian.com/world/rss", # The Guardian World
80
  "https://feeds.npr.org/1001/rss.xml", # NPR News
81
  "https://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml", # NYT Home Page
82
+ "https://apnews.com/hub/business?format=rss", # Associated Press Business (updated URL)
83
+ "https://feeds.washingtonpost.com/rss/business", # Washington Post Business (updated URL)
84
  ]
85
 
86
+
87
  headers = {
88
  "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
89
  }
 
200
  }}
201
  ]
202
  }}
203
+
204
  """
205
  try:
206
  completion = client.chat.completions.create(
 
216
  for chunk in completion:
217
  coverage_diff += chunk.choices[0].delta.content or ""
218
 
219
+ text = coverage_diff.strip() # Fixed: removed space between 'text' and '='
220
  pattern = r'```json\s*([\s\S]*?)\s*```'
221
  match = re.search(pattern, text)
222
 
223
  if match:
224
+ json_str = match.group(1) # Get the content between the markers
225
  try:
226
+ # Parse the JSON to verify it's valid and return as dictionary
227
  json_dict = json.loads(json_str)
228
  json_dict = json.dumps(json_dict, indent=4)
229
  return json_dict
 
234
  except Exception as e:
235
  return f"Error in Groq API call: {str(e)}"
236
 
237
+
238
+
239
  def similarity_based_common_topics(processed_articles, similarity_threshold=0.8, min_articles=2):
240
  keyword_clusters = defaultdict(list)
241
  for article in processed_articles:
 
304
  deduplicated_unique.add(topic)
305
  unique_topics[f"Unique Topics in Article {idx+1}"] = deduplicated_unique
306
  final_sentiment = max(sentiment_summary, key=sentiment_summary.get)
 
307
  # Add stock growth expectation based on sentiment
308
  if final_sentiment == "Positive":
309
  sentiment_statement = (f"{company_name}’s latest news coverage is mostly {final_sentiment.lower()}. "
 
324
  if not company_name:
325
  print("❌ Error: Company name is required")
326
  return None
327
+
328
+ file_name = f"{company_name}_news.json"
329
  articles = []
330
+ article_limit = 10 # Set desired article limit
 
 
331
  article_queue = queue.Queue()
332
  article_limit_reached = threading.Event()
333
+
334
+ print(f"πŸš€ Starting parallel fetching for {company_name}...")
335
+
336
+ # Use all RSS feeds for comprehensive search
337
+ with concurrent.futures.ThreadPoolExecutor(max_workers=20) as fetch_executor:
338
+ # Submit all RSS feed fetch tasks
339
+ fetch_futures = [fetch_executor.submit(
340
+ fetch_articles_from_rss,
341
+ url,
342
+ company_name,
343
+ article_queue,
344
+ article_limit_reached
345
+ ) for url in rss_feeds]
346
+
347
+ # Process articles concurrently
348
+ with concurrent.futures.ThreadPoolExecutor(max_workers=10) as process_executor:
349
  processing_futures = []
350
+
351
+ # Dynamic article processing loop
352
+ while len(articles) < article_limit:
353
  try:
354
+ # Get article with timeout
355
+ article_data = article_queue.get(timeout=2)
356
+
357
+ # Submit for processing
358
+ future = process_executor.submit(
359
+ process_article_content,
360
+ article_data
361
+ )
362
  processing_futures.append(future)
363
+
364
  except queue.Empty:
365
+ # Check if we should continue waiting
366
+ if all(f.done() for f in fetch_futures):
367
+ print("⚠️ All feeds processed before reaching article limit")
368
+ break
369
+
370
+ # Process completed articles
371
  for future in concurrent.futures.as_completed(processing_futures):
 
 
 
372
  result = future.result()
373
  if result:
374
  articles.append(result)
375
+ print(f"πŸ“Š Collected {len(articles)}/{article_limit} articles")
376
+
377
+ # Exit immediately when limit reached
378
+ if len(articles) >= article_limit:
379
  article_limit_reached.set()
380
+ print(f"βœ… Reached {article_limit} articles. Stopping all operations.")
381
  break
382
+
383
+ # Final article processing
384
  articles = articles[:article_limit]
385
  if not articles:
386
+ print(f"❌ No relevant articles found for {company_name}")
387
  return None
388
+
389
  print(f"βœ… Saving {len(articles)} articles to {file_name}")
390
  analysis_result = comparative_analysis(articles, company_name)
391
  coverage_differences = get_coverage_differences(articles, company_name)
392
+
393
+ # Parse coverage_differences if it’s a string
394
+ if isinstance(coverage_differences, str):
395
+ try:
396
+ coverage_differences = json.loads(coverage_differences)
397
+ except json.JSONDecodeError as e:
398
+ print(f"❌ Failed to parse Coverage Differences: {e}")
399
+ coverage_differences = {"Coverage Differences": []}
400
+
401
  sentiment_distribution = {"Positive": 0, "Negative": 0, "Neutral": 0}
402
  for article in articles:
403
  sentiment_distribution[article["sentiment"]] += 1
404
+
405
  formatted_articles = [{"Title": article["title"], "Summary": article["summary"],
406
  "Sentiment": article["sentiment"], "Topics": article["keywords"].split(", ")}
407
  for article in articles]
408
+
409
  output_data = {
410
  "Company": company_name,
411
  "Articles": formatted_articles,
 
417
  },
418
  "Final Sentiment Analysis": analysis_result['Final Sentiment Analysis']
419
  }
420
+
421
  with open(file_name, "w", encoding="utf-8") as file:
422
+ json.dump(output_data, file, indent=4, ensure_ascii=False)
423
+
424
+ print(f"βœ… File saved successfully as JSON: {file_name}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425
  return file_name
426
 
427
  if __name__ == "__main__":