horami / anfsorani.py
razhan's picture
Upload anfsorani.py
f438ea1 verified
import trafilatura
import requests
import time
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import csv
import json
import pandas as pd
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor, as_completed
from queue import Queue
import threading
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
class ArticleScraper:
def __init__(self, base_url):
self.base_url = base_url
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
self.articles = []
self.csv_lock = threading.Lock()
self.session = self._create_session()
def _create_session(self):
"""Create a session with retry strategy"""
session = requests.Session()
retry_strategy = Retry(
total=3,
backoff_factor=1,
status_forcelist=[429, 500, 502, 503, 504]
)
adapter = HTTPAdapter(max_retries=retry_strategy, pool_connections=100, pool_maxsize=100)
session.mount("http://", adapter)
session.mount("https://", adapter)
session.headers.update(self.headers)
return session
def get_article_urls_from_page(self, page_num):
"""Extract article URLs from a single page"""
try:
url = f"{self.base_url}?page={page_num}"
response = self.session.get(url, timeout=30)
response.raise_for_status()
# Use lxml parser for faster parsing
soup = BeautifulSoup(response.content, 'lxml')
# Use CSS selector for faster selection
tracks = soup.select('ul.listing li.track div.track-holder a[href]')
article_urls = [urljoin(self.base_url, a['href']) for a in tracks]
print(f"Found {len(article_urls)} articles on page {page_num}")
return article_urls
except Exception as e:
print(f"Error getting articles from page {page_num}: {str(e)}")
return []
def scrape_article(self, url):
"""Scrape content from a single article"""
try:
print(f"Scraping article: {url}")
# downloaded = trafilatura.fetch_url(url, session=self.session)
downloaded = self.session.get(url, timeout=30).content
if downloaded:
content = trafilatura.extract(downloaded,
include_links=False,
include_images=False,
include_tables=False,
include_formatting=False,
with_metadata=False,
output_format='txt')
if content:
cleaned_content = content.strip()
# Use lock when writing to CSV
with self.csv_lock:
with open('sorani.csv', 'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow([cleaned_content])
return True
print(f"No content found for: {url}")
return False
except Exception as e:
print(f"Error scraping article {url}: {str(e)}")
return False
def scrape_urls_batch(self, urls):
"""Scrape a batch of URLs"""
for url in urls:
self.scrape_article(url)
def scrape_all_pages(self, start_page, end_page):
"""Scrape articles from all pages using parallel processing"""
total_articles = 0
successful_articles = 0
all_urls = []
print(f"Starting scraping from page {start_page} to {end_page}")
# First gather all URLs using ThreadPoolExecutor
print("Gathering all article URLs...")
with ThreadPoolExecutor(max_workers=10) as executor:
future_to_page = {
executor.submit(self.get_article_urls_from_page, page_num): page_num
for page_num in range(start_page, end_page + 1)
}
for future in as_completed(future_to_page):
page_urls = future.result()
all_urls.extend(page_urls)
total_articles += len(page_urls)
print(f"Found total of {len(all_urls)} articles. Starting content scraping...")
# Then scrape all articles using ThreadPoolExecutor
# Split URLs into batches for better memory management
batch_size = 50
url_batches = [all_urls[i:i + batch_size] for i in range(0, len(all_urls), batch_size)]
with ThreadPoolExecutor(max_workers=20) as executor:
futures = [executor.submit(self.scrape_urls_batch, batch) for batch in url_batches]
for future in as_completed(futures):
try:
future.result()
except Exception as e:
print(f"Batch processing error: {str(e)}")
print(f"\nScraping completed!")
print(f"Total articles found: {total_articles}")
# Verify and clean the CSV file
self.verify_and_clean_csv()
def verify_and_clean_csv(self):
"""Verify the CSV file and remove any duplicate or empty rows"""
try:
# Read the CSV file in chunks to handle large files
chunk_size = 10000
chunks = []
for chunk in pd.read_csv('sorani.csv', chunksize=chunk_size):
chunks.append(chunk)
df = pd.concat(chunks)
# Remove duplicates and empty rows
initial_rows = len(df)
df.drop_duplicates(subset=['articles'], keep='first', inplace=True)
df.dropna(subset=['articles'], inplace=True)
df = df[df['articles'].str.strip() != '']
# Save the cleaned data
df.to_csv('sorani.csv', index=False, encoding='utf-8')
print(f"\nCSV Cleaning Results:")
print(f"Initial rows: {initial_rows}")
print(f"Final rows after cleaning: {len(df)}")
except Exception as e:
print(f"Error during CSV cleaning: {str(e)}")
def main():
# base_url = "https://anfsorani.com/zhyawa-swwrya"
# base_url = "https://anfsorani.com/zhhhaty-nawhast"
# base_url = "https://anfsorani.com/جیهان"
base_url = "https://anfsorani.com/witar"
start_page = 1
end_page = 20
scraper = ArticleScraper(base_url)
scraper.scrape_all_pages(start_page, end_page)
if __name__ == "__main__":
main()