TrickGPT / scraper.py
fariasultana's picture
Upload folder using huggingface_hub
b30de99 verified
import asyncio
import json
from playwright.async_api import async_playwright
async def scrape_trickbd():
"""
Scrapes articles from trickbd.com, formats them into a dataset for AI model training,
and saves the data to a JSONL file.
"""
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
page = await browser.new_page()
# Navigate to the website with a desktop theme
base_url = "https://trickbd.com/?theme_change=desktop"
try:
await page.goto(base_url, timeout=120000) # Increased timeout
except Exception as e:
print(f"Failed to load the main page: {e}")
await browser.close()
return
# Extract unique article links from the homepage
links = await page.eval_on_selector_all("h3 a", "elements => elements.map(e => e.href)")
unique_links = list(set([link for link in links if "/author/" not in link and "/category/" not in link]))
print(f"Found {len(unique_links)} unique article links.")
articles = []
# Scrape a limited number of articles for the dataset sample
for link in unique_links[:15]:
try:
print(f"Navigating to: {link}")
await page.goto(link, timeout=120000) # Increased timeout
await page.wait_for_load_state("domcontentloaded", timeout=60000)
# Extract title, content, and category using robust selectors
title = await page.evaluate("() => document.querySelector('h1')?.innerText || document.title")
content = await page.evaluate("""() => {
const selectors = ['.entry-content', '.post-content', 'article'];
for (const s of selectors) {
const el = document.querySelector(s);
if (el) return el.innerText;
}
return "";
}""")
category = await page.evaluate("() => document.querySelector('.cat-links')?.innerText || 'Uncategorized'")
if content:
articles.append({
"instruction": f"Summarize the key points of the following article from TrickBD titled '{title}'.",
"input": content.strip(),
"output": "", # To be filled by the model
"metadata": {
"title": title,
"category": category,
"url": link,
"source": "trickbd.com"
}
})
print(f"Successfully scraped: {title[:60]}...")
else:
print(f"Could not extract content from: {link}")
except Exception as e:
print(f"Error scraping {link}: {e}")
await browser.close()
# Save the scraped data to a JSONL file
output_file = "/home/ubuntu/trickgpt_dataset.jsonl"
with open(output_file, "w", encoding="utf-8") as f:
for article in articles:
f.write(json.dumps(article, ensure_ascii=False) + "\n")
print(f"Scraping completed. Saved {len(articles)} articles to {output_file}")
if __name__ == "__main__":
asyncio.run(scrape_trickbd())