|
|
""" |
|
|
Create HuggingFace dataset from scraped news articles. |
|
|
""" |
|
|
|
|
|
import json |
|
|
from pathlib import Path |
|
|
from datasets import Dataset, DatasetDict |
|
|
|
|
|
DATA_DIR = Path(__file__).parent.parent / "data" |
|
|
OUTPUT_DIR = Path(__file__).parent.parent / "dataset" |
|
|
|
|
|
|
|
|
def load_articles(data_dir: Path) -> list[dict]: |
|
|
"""Load articles from JSON files in data directory.""" |
|
|
articles = [] |
|
|
for json_file in data_dir.glob("**/*.json"): |
|
|
with open(json_file, "r", encoding="utf-8") as f: |
|
|
data = json.load(f) |
|
|
if isinstance(data, list): |
|
|
articles.extend(data) |
|
|
else: |
|
|
articles.append(data) |
|
|
return articles |
|
|
|
|
|
|
|
|
def main(): |
|
|
OUTPUT_DIR.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
all_articles = load_articles(DATA_DIR) |
|
|
print(f"Loaded {len(all_articles)} articles total") |
|
|
|
|
|
print(f"\nTotal articles: {len(all_articles)}") |
|
|
|
|
|
|
|
|
dataset_records = [] |
|
|
for article in all_articles: |
|
|
record = { |
|
|
"source": article.get("source", ""), |
|
|
"url": article.get("url", ""), |
|
|
"category": article.get("category", ""), |
|
|
"content": article.get("content", ""), |
|
|
"title": article.get("title", ""), |
|
|
"description": article.get("description", ""), |
|
|
"publish_date": article.get("publish_date", ""), |
|
|
} |
|
|
dataset_records.append(record) |
|
|
|
|
|
|
|
|
dataset = Dataset.from_list(dataset_records) |
|
|
|
|
|
|
|
|
split_dataset = dataset.train_test_split(test_size=0.1, seed=42) |
|
|
|
|
|
dataset_dict = DatasetDict({ |
|
|
"train": split_dataset["train"], |
|
|
"test": split_dataset["test"] |
|
|
}) |
|
|
|
|
|
|
|
|
dataset_dict.save_to_disk(OUTPUT_DIR / "UVN-1") |
|
|
|
|
|
|
|
|
print("\n=== Dataset Statistics ===") |
|
|
print(f"Train samples: {len(dataset_dict['train'])}") |
|
|
print(f"Test samples: {len(dataset_dict['test'])}") |
|
|
|
|
|
|
|
|
print("\n=== Category Distribution ===") |
|
|
categories = {} |
|
|
for record in dataset_records: |
|
|
cat = record["category"] |
|
|
categories[cat] = categories.get(cat, 0) + 1 |
|
|
|
|
|
for cat, count in sorted(categories.items(), key=lambda x: -x[1]): |
|
|
print(f" {cat}: {count}") |
|
|
|
|
|
|
|
|
print("\n=== Source Distribution ===") |
|
|
sources = {} |
|
|
for record in dataset_records: |
|
|
src = record["source"] |
|
|
sources[src] = sources.get(src, 0) + 1 |
|
|
|
|
|
for src, count in sorted(sources.items(), key=lambda x: -x[1]): |
|
|
print(f" {src}: {count}") |
|
|
|
|
|
print(f"\nDataset saved to: {OUTPUT_DIR / 'UVN-1'}") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|