File size: 8,344 Bytes
cce70aa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
import requests
from bs4 import BeautifulSoup
import time
import random
import json
from pathlib import Path
import logging
from urllib.parse import urljoin
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class BengaliDataCollector:
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
self.output_dir = Path('data/raw')
self.output_dir.mkdir(parents=True, exist_ok=True)
def make_request(self, url, retries=3, delay=1):
"""Make HTTP request with retry logic and rate limiting"""
for attempt in range(retries):
try:
time.sleep(delay + random.random()) # Rate limiting with jitter
response = requests.get(url, headers=self.headers)
response.raise_for_status()
return response
except requests.RequestException as e:
logger.warning(f"Attempt {attempt + 1} failed for {url}: {str(e)}")
if attempt == retries - 1:
logger.error(f"Failed to fetch {url} after {retries} attempts")
raise
time.sleep(delay * (attempt + 1)) # Exponential backoff
def scrape_wikipedia(self):
"""Scrape Bengali text from Wikipedia"""
url = "https://bn.wikipedia.org/wiki/প্রধান_পাতা"
logger.info(f"Scraping Wikipedia: {url}")
try:
response = self.make_request(url)
soup = BeautifulSoup(response.content, 'html.parser')
# Get main content and featured articles
content_div = soup.find('div', {'id': 'mw-content-text'})
articles = []
if content_div:
# Extract article links
article_links = content_div.find_all('a', href=True)
for link in article_links[:50]: # Limit to first 50 articles
if link['href'].startswith('/wiki/') and ':' not in link['href']:
article_url = urljoin('https://bn.wikipedia.org', link['href'])
try:
article_response = self.make_request(article_url)
article_soup = BeautifulSoup(article_response.content, 'html.parser')
# Extract article content
article_content = article_soup.find('div', {'id': 'mw-content-text'})
if article_content:
text = article_content.get_text(separator='\n', strip=True)
articles.append({
'url': article_url,
'content': text
})
logger.info(f"Successfully scraped article: {article_url}")
except Exception as e:
logger.error(f"Failed to scrape article {article_url}: {str(e)}")
# Save Wikipedia data
with open(self.output_dir / 'wikipedia_data.json', 'w', encoding='utf-8') as f:
json.dump(articles, f, ensure_ascii=False, indent=2)
return len(articles)
except Exception as e:
logger.error(f"Failed to scrape Wikipedia: {str(e)}")
return 0
def scrape_prothom_alo(self):
"""Scrape Bengali text from Prothom Alo"""
base_url = "https://www.prothomalo.com"
categories = ['bangladesh', 'international', 'opinion', 'science-technology']
articles = []
for category in categories:
url = f"{base_url}/{category}"
logger.info(f"Scraping Prothom Alo category: {category}")
try:
response = self.make_request(url)
soup = BeautifulSoup(response.content, 'html.parser')
# Find article links
article_links = soup.find_all('a', href=True)
for link in article_links[:10]: # Limit to 10 articles per category
article_url = urljoin(base_url, link['href'])
if category in article_url:
try:
article_response = self.make_request(article_url)
article_soup = BeautifulSoup(article_response.content, 'html.parser')
# Extract article content
article_content = article_soup.find('div', {'class': 'story-content'})
if article_content:
text = article_content.get_text(separator='\n', strip=True)
articles.append({
'url': article_url,
'category': category,
'content': text
})
logger.info(f"Successfully scraped article: {article_url}")
except Exception as e:
logger.error(f"Failed to scrape article {article_url}: {str(e)}")
except Exception as e:
logger.error(f"Failed to scrape category {category}: {str(e)}")
# Save Prothom Alo data
with open(self.output_dir / 'prothomalo_data.json', 'w', encoding='utf-8') as f:
json.dump(articles, f, ensure_ascii=False, indent=2)
return len(articles)
def collect(self):
"""Main method to collect data from all sources"""
logger.info("Starting data collection")
wiki_count = self.scrape_wikipedia()
logger.info(f"Collected {wiki_count} articles from Wikipedia")
prothomalo_count = self.scrape_prothom_alo()
logger.info(f"Collected {prothomalo_count} articles from Prothom Alo")
# Combine and process the collected data
self.process_collected_data()
logger.info("Data collection completed")
def process_collected_data(self):
"""Process and combine collected data"""
try:
# Read collected data
with open(self.output_dir / 'wikipedia_data.json', 'r', encoding='utf-8') as f:
wiki_data = json.load(f)
with open(self.output_dir / 'prothomalo_data.json', 'r', encoding='utf-8') as f:
news_data = json.load(f)
# Combine and format data
processed_data = []
# Process Wikipedia articles
for article in wiki_data:
processed_data.append({
'text': article['content'],
'source': 'wikipedia',
'url': article['url']
})
# Process news articles
for article in news_data:
processed_data.append({
'text': article['content'],
'source': 'prothomalo',
'category': article.get('category', ''),
'url': article['url']
})
# Save processed data
with open(self.output_dir / 'processed_data.json', 'w', encoding='utf-8') as f:
json.dump(processed_data, f, ensure_ascii=False, indent=2)
logger.info(f"Successfully processed {len(processed_data)} articles")
except Exception as e:
logger.error(f"Failed to process collected data: {str(e)}")
raise
if __name__ == "__main__":
collector = BengaliDataCollector()
collector.collect()
|