Datasets:
Upload scripts/create_dataset.py with huggingface_hub
Browse files- scripts/create_dataset.py +92 -0
scripts/create_dataset.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Create HuggingFace dataset from scraped news articles.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from datasets import Dataset, DatasetDict
|
| 8 |
+
|
| 9 |
+
DATA_DIR = Path(__file__).parent.parent / "data"
|
| 10 |
+
OUTPUT_DIR = Path(__file__).parent.parent / "dataset"
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def load_articles(data_dir: Path) -> list[dict]:
|
| 14 |
+
"""Load articles from JSON files in data directory."""
|
| 15 |
+
articles = []
|
| 16 |
+
for json_file in data_dir.glob("**/*.json"):
|
| 17 |
+
with open(json_file, "r", encoding="utf-8") as f:
|
| 18 |
+
data = json.load(f)
|
| 19 |
+
if isinstance(data, list):
|
| 20 |
+
articles.extend(data)
|
| 21 |
+
else:
|
| 22 |
+
articles.append(data)
|
| 23 |
+
return articles
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def main():
|
| 27 |
+
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
|
| 28 |
+
|
| 29 |
+
# Load all articles
|
| 30 |
+
all_articles = load_articles(DATA_DIR)
|
| 31 |
+
print(f"Loaded {len(all_articles)} articles total")
|
| 32 |
+
|
| 33 |
+
print(f"\nTotal articles: {len(all_articles)}")
|
| 34 |
+
|
| 35 |
+
# Create dataset with required fields
|
| 36 |
+
dataset_records = []
|
| 37 |
+
for article in all_articles:
|
| 38 |
+
record = {
|
| 39 |
+
"source": article.get("source", ""),
|
| 40 |
+
"url": article.get("url", ""),
|
| 41 |
+
"category": article.get("category", ""),
|
| 42 |
+
"content": article.get("content", ""),
|
| 43 |
+
"title": article.get("title", ""),
|
| 44 |
+
"description": article.get("description", ""),
|
| 45 |
+
"publish_date": article.get("publish_date", ""),
|
| 46 |
+
}
|
| 47 |
+
dataset_records.append(record)
|
| 48 |
+
|
| 49 |
+
# Create HuggingFace dataset
|
| 50 |
+
dataset = Dataset.from_list(dataset_records)
|
| 51 |
+
|
| 52 |
+
# Split into train/test (90/10)
|
| 53 |
+
split_dataset = dataset.train_test_split(test_size=0.1, seed=42)
|
| 54 |
+
|
| 55 |
+
dataset_dict = DatasetDict({
|
| 56 |
+
"train": split_dataset["train"],
|
| 57 |
+
"test": split_dataset["test"]
|
| 58 |
+
})
|
| 59 |
+
|
| 60 |
+
# Save dataset
|
| 61 |
+
dataset_dict.save_to_disk(OUTPUT_DIR / "UVN-1")
|
| 62 |
+
|
| 63 |
+
# Print statistics
|
| 64 |
+
print("\n=== Dataset Statistics ===")
|
| 65 |
+
print(f"Train samples: {len(dataset_dict['train'])}")
|
| 66 |
+
print(f"Test samples: {len(dataset_dict['test'])}")
|
| 67 |
+
|
| 68 |
+
# Category distribution
|
| 69 |
+
print("\n=== Category Distribution ===")
|
| 70 |
+
categories = {}
|
| 71 |
+
for record in dataset_records:
|
| 72 |
+
cat = record["category"]
|
| 73 |
+
categories[cat] = categories.get(cat, 0) + 1
|
| 74 |
+
|
| 75 |
+
for cat, count in sorted(categories.items(), key=lambda x: -x[1]):
|
| 76 |
+
print(f" {cat}: {count}")
|
| 77 |
+
|
| 78 |
+
# Source distribution
|
| 79 |
+
print("\n=== Source Distribution ===")
|
| 80 |
+
sources = {}
|
| 81 |
+
for record in dataset_records:
|
| 82 |
+
src = record["source"]
|
| 83 |
+
sources[src] = sources.get(src, 0) + 1
|
| 84 |
+
|
| 85 |
+
for src, count in sorted(sources.items(), key=lambda x: -x[1]):
|
| 86 |
+
print(f" {src}: {count}")
|
| 87 |
+
|
| 88 |
+
print(f"\nDataset saved to: {OUTPUT_DIR / 'UVN-1'}")
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
if __name__ == "__main__":
|
| 92 |
+
main()
|