Commit
·
16270cd
1
Parent(s):
7d6ee37
Add ASCII trend charts to temporal analysis
Browse files- Add ascii-graph dependency for text-based visualizations
- Show both high_edu_rate and avg_edu_score charts by year
- Embed charts in README dataset card
- Display charts in console output
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
- finepdfs-stats.py +235 -245
finepdfs-stats.py
CHANGED
|
@@ -4,72 +4,35 @@
|
|
| 4 |
# "polars>=1.31.0",
|
| 5 |
# "huggingface-hub",
|
| 6 |
# "datasets",
|
|
|
|
| 7 |
# ]
|
| 8 |
# ///
|
| 9 |
"""
|
| 10 |
-
|
| 11 |
|
| 12 |
-
|
| 13 |
-
API calls from 139 → 1 for datasets like finepdfs-edu, enabling efficient
|
| 14 |
-
streaming aggregation without downloading the full dataset.
|
| 15 |
|
| 16 |
-
|
| 17 |
-
- HuggingFaceFW/finepdfs-edu (49.5M rows, 350B tokens) - educational subset
|
| 18 |
-
- HuggingFaceFW/finepdfs (476M rows, 3T tokens) - full dataset
|
| 19 |
-
|
| 20 |
-
This script computes:
|
| 21 |
-
- Per-language statistics (doc count, token totals, avg edu scores)
|
| 22 |
-
- Per-extractor statistics
|
| 23 |
-
- Per-dump statistics
|
| 24 |
-
- Global summary metrics
|
| 25 |
-
|
| 26 |
-
The result is a small summary DataFrame that can be uploaded as a new dataset.
|
| 27 |
|
| 28 |
Example usage:
|
| 29 |
-
#
|
| 30 |
-
uv run finepdfs-stats.py --list-languages
|
| 31 |
-
|
| 32 |
-
# Compute stats for English (default: finepdfs-edu)
|
| 33 |
uv run finepdfs-stats.py
|
| 34 |
|
| 35 |
-
#
|
| 36 |
-
uv run finepdfs-stats.py --
|
| 37 |
-
|
| 38 |
-
# Use full finepdfs dataset (476M rows)
|
| 39 |
-
uv run finepdfs-stats.py --source-dataset HuggingFaceFW/finepdfs
|
| 40 |
-
|
| 41 |
-
# Show query plan before execution
|
| 42 |
-
uv run finepdfs-stats.py --show-plan --limit 1000
|
| 43 |
|
| 44 |
-
#
|
| 45 |
-
uv run finepdfs-stats.py --limit 10000
|
| 46 |
|
| 47 |
-
# Save results
|
| 48 |
-
uv run finepdfs-stats.py --output-repo username/finepdfs-
|
| 49 |
|
| 50 |
-
# Run on HF Jobs
|
| 51 |
-
hf jobs uv run finepdfs-stats.py \\
|
| 52 |
-
-s HF_TOKEN \\
|
| 53 |
-
-e HF_XET_HIGH_PERFORMANCE=1 \\
|
| 54 |
-
-- --output-repo username/finepdfs-edu-stats
|
| 55 |
-
|
| 56 |
-
# Or run from a URL
|
| 57 |
hf jobs uv run \\
|
| 58 |
-s HF_TOKEN \\
|
| 59 |
-e HF_XET_HIGH_PERFORMANCE=1 \\
|
| 60 |
-
|
| 61 |
-
-- --output-repo username/
|
| 62 |
-
|
| 63 |
-
Why Polars scan_parquet?
|
| 64 |
-
- Lazy evaluation: builds query plan without loading data
|
| 65 |
-
- Streaming execution: processes data in chunks, constant memory
|
| 66 |
-
- Native HF Hub support: hf://datasets/... paths just work
|
| 67 |
-
- Optimized API calls: PR #25521 reduced API calls 10-100x for HF datasets
|
| 68 |
-
|
| 69 |
-
Performance tips:
|
| 70 |
-
- Set HF_XET_HIGH_PERFORMANCE=1 to maximize network/disk utilization
|
| 71 |
-
- Use --limit for quick tests before running on full dataset
|
| 72 |
-
- Use --show-plan to see Polars query optimization (projection pushdown)
|
| 73 |
"""
|
| 74 |
|
| 75 |
import argparse
|
|
@@ -80,6 +43,7 @@ import time
|
|
| 80 |
from pathlib import Path
|
| 81 |
|
| 82 |
import polars as pl
|
|
|
|
| 83 |
from datasets import Dataset
|
| 84 |
from huggingface_hub import HfApi, create_repo, list_repo_tree, login
|
| 85 |
|
|
@@ -124,165 +88,173 @@ def list_available_languages(dataset_id: str) -> list[str]:
|
|
| 124 |
return list(COMMON_LANGUAGES.keys())
|
| 125 |
|
| 126 |
|
| 127 |
-
def
|
| 128 |
-
"""
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
.
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
)
|
| 146 |
|
| 147 |
|
| 148 |
-
def
|
| 149 |
-
"""
|
| 150 |
return (
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
pl.len().alias("doc_count"),
|
| 154 |
-
pl.col("token_count").sum().alias("total_tokens"),
|
| 155 |
-
pl.col("token_count").mean().alias("avg_tokens"),
|
| 156 |
-
pl.col("is_truncated").sum().alias("truncated_count"),
|
| 157 |
-
pl.col("page_average_lid_score").mean().alias("avg_lid_score"),
|
| 158 |
)
|
| 159 |
-
.
|
| 160 |
-
.
|
|
|
|
|
|
|
| 161 |
)
|
| 162 |
|
| 163 |
|
| 164 |
-
def
|
| 165 |
-
"""
|
| 166 |
-
|
| 167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
.agg(
|
| 169 |
-
pl.
|
| 170 |
-
pl.col("
|
| 171 |
-
pl.col("
|
| 172 |
)
|
| 173 |
-
.sort("
|
| 174 |
-
.collect(engine="streaming")
|
| 175 |
)
|
| 176 |
|
|
|
|
| 177 |
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
|
|
|
|
|
|
| 194 |
|
| 195 |
|
| 196 |
def create_readme(
|
| 197 |
args,
|
| 198 |
global_stats: pl.DataFrame,
|
| 199 |
-
|
|
|
|
|
|
|
| 200 |
) -> str:
|
| 201 |
"""Create README content for the stats dataset."""
|
| 202 |
stats = global_stats.to_dicts()[0]
|
| 203 |
-
|
| 204 |
-
|
| 205 |
|
| 206 |
-
#
|
| 207 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 208 |
|
| 209 |
return f"""---
|
| 210 |
tags:
|
| 211 |
- statistics
|
| 212 |
- polars
|
| 213 |
- finepdfs-edu
|
|
|
|
| 214 |
license: odc-by
|
| 215 |
---
|
| 216 |
|
| 217 |
-
#
|
| 218 |
|
| 219 |
-
|
| 220 |
|
| 221 |
-
##
|
| 222 |
|
| 223 |
-
|
|
|
|
|
|
|
| 224 |
|
| 225 |
-
|
| 226 |
-
|------|------|
|
| 227 |
-
{timing_rows}
|
| 228 |
-
| **Total** | **{total_time:.2f}s** |
|
| 229 |
|
| 230 |
-
|
| 231 |
-
|
|
|
|
|
|
|
| 232 |
|
| 233 |
-
##
|
| 234 |
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
|
| 239 |
-
|
| 240 |
-
uv run finepdfs-stats.py --lang {args.lang} --output-repo {args.output_repo or "username/stats"}
|
| 241 |
-
```
|
| 242 |
-
|
| 243 |
-
## Global Summary
|
| 244 |
|
| 245 |
| Metric | Value |
|
| 246 |
|--------|-------|
|
| 247 |
-
|
|
| 248 |
-
| Total Documents | {
|
| 249 |
-
| Total Tokens | {stats.get("total_tokens",
|
| 250 |
-
|
|
| 251 |
-
|
|
| 252 |
-
|
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
```python
|
| 266 |
-
from datasets import load_dataset
|
| 267 |
-
|
| 268 |
-
# Load all configs
|
| 269 |
-
global_stats = load_dataset("{args.output_repo or "username/stats"}", "global_stats")
|
| 270 |
-
lang_stats = load_dataset("{args.output_repo or "username/stats"}", "language_stats")
|
| 271 |
-
extractor_stats = load_dataset("{args.output_repo or "username/stats"}", "extractor_stats")
|
| 272 |
-
dump_stats = load_dataset("{args.output_repo or "username/stats"}", "dump_stats")
|
| 273 |
```
|
| 274 |
|
| 275 |
## Source
|
| 276 |
|
| 277 |
- **Dataset**: [{args.source_dataset}](https://huggingface.co/datasets/{args.source_dataset})
|
| 278 |
-
- **
|
| 279 |
-
- **Script**: [finepdfs-stats.py](https://huggingface.co/datasets/uv-scripts/data-stats)
|
| 280 |
"""
|
| 281 |
|
| 282 |
|
| 283 |
def main():
|
| 284 |
parser = argparse.ArgumentParser(
|
| 285 |
-
description="
|
| 286 |
formatter_class=argparse.RawDescriptionHelpFormatter,
|
| 287 |
epilog=__doc__,
|
| 288 |
)
|
|
@@ -291,26 +263,32 @@ def main():
|
|
| 291 |
"--source-dataset",
|
| 292 |
type=str,
|
| 293 |
default="HuggingFaceFW/finepdfs-edu",
|
| 294 |
-
help="Source dataset: HuggingFaceFW/finepdfs-edu
|
| 295 |
)
|
| 296 |
|
| 297 |
parser.add_argument(
|
| 298 |
-
"--
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 299 |
action="store_true",
|
| 300 |
-
help="
|
| 301 |
)
|
| 302 |
|
| 303 |
parser.add_argument(
|
| 304 |
-
"--
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
help="Language+script code to process, e.g., eng_Latn, fra_Latn, zho_Hans (default: eng_Latn)",
|
| 308 |
)
|
| 309 |
|
| 310 |
parser.add_argument(
|
| 311 |
"--list-languages",
|
| 312 |
action="store_true",
|
| 313 |
-
help="List available
|
| 314 |
)
|
| 315 |
|
| 316 |
parser.add_argument(
|
|
@@ -329,7 +307,7 @@ def main():
|
|
| 329 |
"--output-dir",
|
| 330 |
type=str,
|
| 331 |
default="./stats_output",
|
| 332 |
-
help="Local directory for output files
|
| 333 |
)
|
| 334 |
|
| 335 |
parser.add_argument(
|
|
@@ -367,11 +345,17 @@ def main():
|
|
| 367 |
sys.exit(0)
|
| 368 |
|
| 369 |
# Build the parquet path
|
| 370 |
-
|
| 371 |
-
f"hf://datasets/{args.source_dataset}/data
|
| 372 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 373 |
logger.info(f"Scanning: {source_path}")
|
| 374 |
-
logger.info(f"
|
| 375 |
|
| 376 |
# Create lazy frame - this doesn't load any data yet!
|
| 377 |
logger.info("Creating lazy query plan...")
|
|
@@ -401,60 +385,64 @@ def main():
|
|
| 401 |
output_dir = Path(args.output_dir)
|
| 402 |
output_dir.mkdir(parents=True, exist_ok=True)
|
| 403 |
|
| 404 |
-
#
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
# Compute statistics (streaming execution happens here)
|
| 408 |
-
logger.info("Computing global statistics...")
|
| 409 |
start = time.perf_counter()
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 414 |
global_stats.write_parquet(output_dir / "global_stats.parquet")
|
|
|
|
| 415 |
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
df = pl.scan_parquet(source_path)
|
| 420 |
-
if args.limit:
|
| 421 |
-
df = df.head(args.limit)
|
| 422 |
-
lang_stats = compute_language_stats(df)
|
| 423 |
-
timings["Language stats"] = time.perf_counter() - start
|
| 424 |
-
print(f"\nLanguage Statistics ({len(lang_stats)} languages):")
|
| 425 |
-
print(lang_stats.head(20))
|
| 426 |
-
lang_stats.write_parquet(output_dir / "language_stats.parquet")
|
| 427 |
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
if args.limit:
|
| 432 |
-
df = df.head(args.limit)
|
| 433 |
-
extractor_stats = compute_extractor_stats(df)
|
| 434 |
-
timings["Extractor stats"] = time.perf_counter() - start
|
| 435 |
-
print("\nExtractor Statistics:")
|
| 436 |
-
print(extractor_stats)
|
| 437 |
-
extractor_stats.write_parquet(output_dir / "extractor_stats.parquet")
|
| 438 |
|
| 439 |
-
|
| 440 |
-
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
print(
|
| 448 |
-
|
| 449 |
-
|
| 450 |
-
#
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
print(
|
| 456 |
-
|
| 457 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 458 |
|
| 459 |
logger.info(f"Results saved to: {output_dir}")
|
| 460 |
|
|
@@ -481,26 +469,27 @@ def main():
|
|
| 481 |
else:
|
| 482 |
raise
|
| 483 |
|
| 484 |
-
# Upload each
|
| 485 |
-
configs =
|
| 486 |
-
"global_stats"
|
| 487 |
-
"
|
| 488 |
-
|
| 489 |
-
"dump_stats": dump_stats,
|
| 490 |
-
}
|
| 491 |
|
| 492 |
-
for config_name,
|
| 493 |
logger.info(f"Uploading {config_name}...")
|
| 494 |
-
ds = Dataset.from_polars(
|
| 495 |
ds.push_to_hub(
|
| 496 |
args.output_repo,
|
| 497 |
config_name=config_name,
|
| 498 |
token=hf_token,
|
| 499 |
private=args.private,
|
| 500 |
)
|
|
|
|
| 501 |
|
| 502 |
-
#
|
| 503 |
-
readme_content = create_readme(
|
|
|
|
|
|
|
| 504 |
api.upload_file(
|
| 505 |
path_or_fileobj=readme_content.encode(),
|
| 506 |
path_in_repo="README.md",
|
|
@@ -516,27 +505,28 @@ def main():
|
|
| 516 |
|
| 517 |
if __name__ == "__main__":
|
| 518 |
if len(sys.argv) == 1:
|
| 519 |
-
print("
|
| 520 |
-
print("=" *
|
| 521 |
-
print("\
|
| 522 |
-
print("using Polars streaming - no
|
| 523 |
print("Example commands:\n")
|
| 524 |
-
print("#
|
| 525 |
-
print("uv run finepdfs-stats.py --list-languages\n")
|
| 526 |
-
print("# Quick test with 10k rows:")
|
| 527 |
print("uv run finepdfs-stats.py --limit 10000\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 528 |
print("# Show query plan (see Polars optimization):")
|
| 529 |
print("uv run finepdfs-stats.py --show-plan --limit 1000\n")
|
| 530 |
-
print("# Process English (default: finepdfs-edu):")
|
| 531 |
-
print("uv run finepdfs-stats.py\n")
|
| 532 |
-
print("# Use full finepdfs dataset (476M rows):")
|
| 533 |
-
print("uv run finepdfs-stats.py --source-dataset HuggingFaceFW/finepdfs\n")
|
| 534 |
print("# Save results to HF Hub:")
|
| 535 |
-
print("uv run finepdfs-stats.py --output-repo username/
|
| 536 |
-
print("# Run on HF Jobs
|
| 537 |
-
print("hf jobs uv run
|
| 538 |
print(" -s HF_TOKEN \\")
|
| 539 |
print(" -e HF_XET_HIGH_PERFORMANCE=1 \\")
|
|
|
|
|
|
|
|
|
|
| 540 |
print(" -- --output-repo username/stats")
|
| 541 |
sys.exit(0)
|
| 542 |
|
|
|
|
| 4 |
# "polars>=1.31.0",
|
| 5 |
# "huggingface-hub",
|
| 6 |
# "datasets",
|
| 7 |
+
# "ascii-graph",
|
| 8 |
# ]
|
| 9 |
# ///
|
| 10 |
"""
|
| 11 |
+
Analyze educational quality trends across CommonCrawl dumps using Polars streaming.
|
| 12 |
|
| 13 |
+
Answers: "Is the web getting more educational over time?"
|
|
|
|
|
|
|
| 14 |
|
| 15 |
+
Demonstrates Polars HF Hub integration - process 23M+ docs without downloading 300GB.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
Example usage:
|
| 18 |
+
# Analyze English PDFs (default)
|
|
|
|
|
|
|
|
|
|
| 19 |
uv run finepdfs-stats.py
|
| 20 |
|
| 21 |
+
# Analyze all 70+ languages
|
| 22 |
+
uv run finepdfs-stats.py --all-languages
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
+
# Quick test
|
| 25 |
+
uv run finepdfs-stats.py --limit 10000 --show-plan
|
| 26 |
|
| 27 |
+
# Save results to HF Hub
|
| 28 |
+
uv run finepdfs-stats.py --output-repo username/finepdfs-temporal-stats
|
| 29 |
|
| 30 |
+
# Run on HF Jobs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
hf jobs uv run \\
|
| 32 |
-s HF_TOKEN \\
|
| 33 |
-e HF_XET_HIGH_PERFORMANCE=1 \\
|
| 34 |
+
https://huggingface.co/datasets/uv-scripts/dataset-stats/raw/main/finepdfs-stats.py \\
|
| 35 |
+
-- --output-repo username/stats
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
"""
|
| 37 |
|
| 38 |
import argparse
|
|
|
|
| 43 |
from pathlib import Path
|
| 44 |
|
| 45 |
import polars as pl
|
| 46 |
+
from ascii_graph import Pyasciigraph
|
| 47 |
from datasets import Dataset
|
| 48 |
from huggingface_hub import HfApi, create_repo, list_repo_tree, login
|
| 49 |
|
|
|
|
| 88 |
return list(COMMON_LANGUAGES.keys())
|
| 89 |
|
| 90 |
|
| 91 |
+
def compute_temporal_stats(df: pl.LazyFrame, output_path: Path) -> pl.DataFrame:
|
| 92 |
+
"""Single scan: compute stats grouped by dump for temporal analysis."""
|
| 93 |
+
query = df.group_by("dump").agg(
|
| 94 |
+
pl.len().alias("doc_count"),
|
| 95 |
+
pl.col("token_count").sum().alias("total_tokens"),
|
| 96 |
+
pl.col("fw_edu_scores").list.mean().mean().alias("avg_edu_score"),
|
| 97 |
+
(pl.col("fw_edu_scores").list.mean() >= 3).sum().alias("high_edu_count"),
|
| 98 |
+
)
|
| 99 |
+
query.sink_parquet(output_path, engine="streaming")
|
| 100 |
+
return pl.read_parquet(output_path)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def compute_global_stats(temporal: pl.DataFrame) -> pl.DataFrame:
|
| 104 |
+
"""Compute global stats from temporal breakdown."""
|
| 105 |
+
total = temporal["doc_count"].sum()
|
| 106 |
+
return pl.DataFrame(
|
| 107 |
+
{
|
| 108 |
+
"total_docs": [total],
|
| 109 |
+
"total_tokens": [temporal["total_tokens"].sum()],
|
| 110 |
+
"avg_edu_score": [
|
| 111 |
+
(temporal["avg_edu_score"] * temporal["doc_count"]).sum() / total
|
| 112 |
+
],
|
| 113 |
+
"high_edu_rate": [temporal["high_edu_count"].sum() / total],
|
| 114 |
+
"num_dumps": [len(temporal)],
|
| 115 |
+
}
|
| 116 |
)
|
| 117 |
|
| 118 |
|
| 119 |
+
def format_temporal_stats(temporal: pl.DataFrame) -> pl.DataFrame:
|
| 120 |
+
"""Format temporal stats with high_edu_rate, sorted chronologically."""
|
| 121 |
return (
|
| 122 |
+
temporal.with_columns(
|
| 123 |
+
(pl.col("high_edu_count") / pl.col("doc_count")).alias("high_edu_rate")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
)
|
| 125 |
+
.select(["dump", "doc_count", "avg_edu_score", "high_edu_rate"])
|
| 126 |
+
.sort(
|
| 127 |
+
"dump"
|
| 128 |
+
) # Chronological order (CC-MAIN-2017-xx comes before CC-MAIN-2024-xx)
|
| 129 |
)
|
| 130 |
|
| 131 |
|
| 132 |
+
def create_ascii_charts(temporal_stats: pl.DataFrame) -> str:
|
| 133 |
+
"""Create ASCII bar charts showing temporal trends."""
|
| 134 |
+
# Extract year from dump name (CC-MAIN-2024-42 -> 2024)
|
| 135 |
+
# Group by year and average the values for cleaner display
|
| 136 |
+
yearly = (
|
| 137 |
+
temporal_stats.with_columns(
|
| 138 |
+
pl.col("dump").str.extract(r"CC-MAIN-(\d{4})", 1).alias("year")
|
| 139 |
+
)
|
| 140 |
+
.group_by("year")
|
| 141 |
.agg(
|
| 142 |
+
pl.col("doc_count").sum(),
|
| 143 |
+
pl.col("avg_edu_score").mean(),
|
| 144 |
+
pl.col("high_edu_rate").mean(),
|
| 145 |
)
|
| 146 |
+
.sort("year")
|
|
|
|
| 147 |
)
|
| 148 |
|
| 149 |
+
lines = []
|
| 150 |
|
| 151 |
+
# High edu rate chart (more dramatic differences)
|
| 152 |
+
data_rate = [
|
| 153 |
+
(row["year"], row["high_edu_rate"] * 100)
|
| 154 |
+
for row in yearly.iter_rows(named=True)
|
| 155 |
+
]
|
| 156 |
+
graph = Pyasciigraph(line_length=60, float_format="{0:.1f}%")
|
| 157 |
+
lines.extend(graph.graph("High Educational Content (edu >= 3)", data_rate))
|
| 158 |
+
|
| 159 |
+
lines.append("")
|
| 160 |
+
|
| 161 |
+
# Avg edu score chart
|
| 162 |
+
data_score = [
|
| 163 |
+
(row["year"], row["avg_edu_score"]) for row in yearly.iter_rows(named=True)
|
| 164 |
+
]
|
| 165 |
+
graph2 = Pyasciigraph(line_length=60, float_format="{0:.2f}")
|
| 166 |
+
lines.extend(graph2.graph("Average Educational Score", data_score))
|
| 167 |
+
|
| 168 |
+
return "\n".join(lines)
|
| 169 |
|
| 170 |
|
| 171 |
def create_readme(
|
| 172 |
args,
|
| 173 |
global_stats: pl.DataFrame,
|
| 174 |
+
temporal_stats: pl.DataFrame,
|
| 175 |
+
scan_time: float,
|
| 176 |
+
ascii_charts: str,
|
| 177 |
) -> str:
|
| 178 |
"""Create README content for the stats dataset."""
|
| 179 |
stats = global_stats.to_dicts()[0]
|
| 180 |
+
total_docs = stats.get("total_docs", 0)
|
| 181 |
+
docs_per_sec = total_docs / scan_time if scan_time > 0 else 0
|
| 182 |
|
| 183 |
+
# Get first and last dumps for trend
|
| 184 |
+
first_dump = temporal_stats.head(1).to_dicts()[0]
|
| 185 |
+
last_dump = temporal_stats.tail(1).to_dicts()[0]
|
| 186 |
+
|
| 187 |
+
scope = (
|
| 188 |
+
"all languages"
|
| 189 |
+
if args.all_languages
|
| 190 |
+
else COMMON_LANGUAGES.get(args.lang, args.lang)
|
| 191 |
+
)
|
| 192 |
|
| 193 |
return f"""---
|
| 194 |
tags:
|
| 195 |
- statistics
|
| 196 |
- polars
|
| 197 |
- finepdfs-edu
|
| 198 |
+
- temporal-analysis
|
| 199 |
license: odc-by
|
| 200 |
---
|
| 201 |
|
| 202 |
+
# Is the Web Getting More Educational?
|
| 203 |
|
| 204 |
+
Temporal analysis of educational quality across {stats.get("num_dumps", 0)} CommonCrawl dumps.
|
| 205 |
|
| 206 |
+
## Trend
|
| 207 |
|
| 208 |
+
```
|
| 209 |
+
{ascii_charts}
|
| 210 |
+
```
|
| 211 |
|
| 212 |
+
## Key Finding
|
|
|
|
|
|
|
|
|
|
| 213 |
|
| 214 |
+
| Period | Avg Edu Score | High Edu Rate |
|
| 215 |
+
|--------|---------------|---------------|
|
| 216 |
+
| {first_dump["dump"]} | {first_dump["avg_edu_score"]:.3f} | {first_dump["high_edu_rate"] * 100:.1f}% |
|
| 217 |
+
| {last_dump["dump"]} | {last_dump["avg_edu_score"]:.3f} | {last_dump["high_edu_rate"] * 100:.1f}% |
|
| 218 |
|
| 219 |
+
## Performance
|
| 220 |
|
| 221 |
+
- **{total_docs:,} documents** processed in **{scan_time:.0f} seconds**
|
| 222 |
+
- **{docs_per_sec:,.0f} docs/sec** using Polars streaming
|
| 223 |
+
- Single scan, no download of 300GB+ dataset
|
| 224 |
|
| 225 |
+
## Summary
|
|
|
|
|
|
|
|
|
|
|
|
|
| 226 |
|
| 227 |
| Metric | Value |
|
| 228 |
|--------|-------|
|
| 229 |
+
| Scope | {scope} |
|
| 230 |
+
| Total Documents | {total_docs:,} |
|
| 231 |
+
| Total Tokens | {stats.get("total_tokens", 0):,} |
|
| 232 |
+
| Avg Edu Score | {stats.get("avg_edu_score", 0):.3f} |
|
| 233 |
+
| High Edu Rate | {stats.get("high_edu_rate", 0) * 100:.1f}% |
|
| 234 |
+
| CommonCrawl Dumps | {stats.get("num_dumps", 0)} |
|
| 235 |
+
|
| 236 |
+
## Files
|
| 237 |
+
|
| 238 |
+
- `global_stats` - Overall summary
|
| 239 |
+
- `temporal_stats` - Per-dump breakdown (sorted chronologically)
|
| 240 |
+
|
| 241 |
+
## Reproduce
|
| 242 |
+
|
| 243 |
+
```bash
|
| 244 |
+
uv run https://huggingface.co/datasets/uv-scripts/dataset-stats/raw/main/finepdfs-stats.py \\
|
| 245 |
+
{"--all-languages" if args.all_languages else f"--lang {args.lang}"} --output-repo your-username/stats
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 246 |
```
|
| 247 |
|
| 248 |
## Source
|
| 249 |
|
| 250 |
- **Dataset**: [{args.source_dataset}](https://huggingface.co/datasets/{args.source_dataset})
|
| 251 |
+
- **Script**: [uv-scripts/dataset-stats](https://huggingface.co/datasets/uv-scripts/dataset-stats)
|
|
|
|
| 252 |
"""
|
| 253 |
|
| 254 |
|
| 255 |
def main():
|
| 256 |
parser = argparse.ArgumentParser(
|
| 257 |
+
description="Analyze educational quality trends across CommonCrawl dumps",
|
| 258 |
formatter_class=argparse.RawDescriptionHelpFormatter,
|
| 259 |
epilog=__doc__,
|
| 260 |
)
|
|
|
|
| 263 |
"--source-dataset",
|
| 264 |
type=str,
|
| 265 |
default="HuggingFaceFW/finepdfs-edu",
|
| 266 |
+
help="Source dataset (default: HuggingFaceFW/finepdfs-edu)",
|
| 267 |
)
|
| 268 |
|
| 269 |
parser.add_argument(
|
| 270 |
+
"--lang",
|
| 271 |
+
type=str,
|
| 272 |
+
default="eng_Latn",
|
| 273 |
+
help="Language+script code (default: eng_Latn)",
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
parser.add_argument(
|
| 277 |
+
"--all-languages",
|
| 278 |
action="store_true",
|
| 279 |
+
help="Analyze all languages (70+) instead of single language",
|
| 280 |
)
|
| 281 |
|
| 282 |
parser.add_argument(
|
| 283 |
+
"--show-plan",
|
| 284 |
+
action="store_true",
|
| 285 |
+
help="Show Polars query plan (demonstrates optimization)",
|
|
|
|
| 286 |
)
|
| 287 |
|
| 288 |
parser.add_argument(
|
| 289 |
"--list-languages",
|
| 290 |
action="store_true",
|
| 291 |
+
help="List available languages and exit",
|
| 292 |
)
|
| 293 |
|
| 294 |
parser.add_argument(
|
|
|
|
| 307 |
"--output-dir",
|
| 308 |
type=str,
|
| 309 |
default="./stats_output",
|
| 310 |
+
help="Local directory for output files",
|
| 311 |
)
|
| 312 |
|
| 313 |
parser.add_argument(
|
|
|
|
| 345 |
sys.exit(0)
|
| 346 |
|
| 347 |
# Build the parquet path
|
| 348 |
+
if args.all_languages:
|
| 349 |
+
source_path = f"hf://datasets/{args.source_dataset}/data/*/train/*.parquet"
|
| 350 |
+
scope_desc = "all languages"
|
| 351 |
+
else:
|
| 352 |
+
source_path = (
|
| 353 |
+
f"hf://datasets/{args.source_dataset}/data/{args.lang}/train/*.parquet"
|
| 354 |
+
)
|
| 355 |
+
scope_desc = f"{args.lang} ({COMMON_LANGUAGES.get(args.lang, 'unknown')})"
|
| 356 |
+
|
| 357 |
logger.info(f"Scanning: {source_path}")
|
| 358 |
+
logger.info(f"Scope: {scope_desc}")
|
| 359 |
|
| 360 |
# Create lazy frame - this doesn't load any data yet!
|
| 361 |
logger.info("Creating lazy query plan...")
|
|
|
|
| 385 |
output_dir = Path(args.output_dir)
|
| 386 |
output_dir.mkdir(parents=True, exist_ok=True)
|
| 387 |
|
| 388 |
+
# Single scan: compute temporal stats
|
| 389 |
+
logger.info("Computing temporal stats (single scan)...")
|
|
|
|
|
|
|
|
|
|
| 390 |
start = time.perf_counter()
|
| 391 |
+
temporal_path = output_dir / "temporal_stats.parquet"
|
| 392 |
+
temporal_raw = compute_temporal_stats(df, temporal_path)
|
| 393 |
+
scan_time = time.perf_counter() - start
|
| 394 |
+
logger.info(f"Scan complete in {scan_time:.2f}s - {len(temporal_raw)} dumps")
|
| 395 |
+
|
| 396 |
+
# Compute stats
|
| 397 |
+
global_stats = compute_global_stats(temporal_raw)
|
| 398 |
+
temporal_stats = format_temporal_stats(temporal_raw)
|
| 399 |
+
|
| 400 |
+
# Save
|
| 401 |
global_stats.write_parquet(output_dir / "global_stats.parquet")
|
| 402 |
+
temporal_stats.write_parquet(output_dir / "temporal_stats.parquet")
|
| 403 |
|
| 404 |
+
# Print results
|
| 405 |
+
total_docs = global_stats["total_docs"][0]
|
| 406 |
+
docs_per_sec = total_docs / scan_time if scan_time > 0 else 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 407 |
|
| 408 |
+
print("\n" + "=" * 70)
|
| 409 |
+
print("IS THE WEB GETTING MORE EDUCATIONAL?")
|
| 410 |
+
print("=" * 70)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 411 |
|
| 412 |
+
print(f"\nScope: {scope_desc}")
|
| 413 |
+
print(f"Dataset: {args.source_dataset}")
|
| 414 |
+
|
| 415 |
+
print("\n" + "-" * 70)
|
| 416 |
+
print("GLOBAL STATS")
|
| 417 |
+
print("-" * 70)
|
| 418 |
+
print(global_stats)
|
| 419 |
+
|
| 420 |
+
print("\n" + "-" * 70)
|
| 421 |
+
print(f"TEMPORAL TREND ({len(temporal_stats)} CommonCrawl dumps)")
|
| 422 |
+
print("-" * 70)
|
| 423 |
+
# Show first 5 and last 5
|
| 424 |
+
if len(temporal_stats) > 10:
|
| 425 |
+
print("Earliest dumps:")
|
| 426 |
+
print(temporal_stats.head(5))
|
| 427 |
+
print("\n...")
|
| 428 |
+
print("\nLatest dumps:")
|
| 429 |
+
print(temporal_stats.tail(5))
|
| 430 |
+
else:
|
| 431 |
+
print(temporal_stats)
|
| 432 |
+
|
| 433 |
+
# Create ASCII charts
|
| 434 |
+
ascii_charts = create_ascii_charts(temporal_stats)
|
| 435 |
+
print("\n" + "-" * 70)
|
| 436 |
+
print("TREND VISUALIZATION")
|
| 437 |
+
print("-" * 70)
|
| 438 |
+
print(ascii_charts)
|
| 439 |
+
|
| 440 |
+
print("\n" + "-" * 70)
|
| 441 |
+
print("PERFORMANCE")
|
| 442 |
+
print("-" * 70)
|
| 443 |
+
print(f"Scan time: {scan_time:.2f}s")
|
| 444 |
+
print(f"Documents: {total_docs:,}")
|
| 445 |
+
print(f"Throughput: {docs_per_sec:,.0f} docs/sec")
|
| 446 |
|
| 447 |
logger.info(f"Results saved to: {output_dir}")
|
| 448 |
|
|
|
|
| 469 |
else:
|
| 470 |
raise
|
| 471 |
|
| 472 |
+
# Upload each as a dataset config
|
| 473 |
+
configs = [
|
| 474 |
+
("global_stats", global_stats),
|
| 475 |
+
("temporal_stats", temporal_stats),
|
| 476 |
+
]
|
|
|
|
|
|
|
| 477 |
|
| 478 |
+
for config_name, stats_df in configs:
|
| 479 |
logger.info(f"Uploading {config_name}...")
|
| 480 |
+
ds = Dataset.from_polars(stats_df)
|
| 481 |
ds.push_to_hub(
|
| 482 |
args.output_repo,
|
| 483 |
config_name=config_name,
|
| 484 |
token=hf_token,
|
| 485 |
private=args.private,
|
| 486 |
)
|
| 487 |
+
time.sleep(1) # Avoid 409 conflicts
|
| 488 |
|
| 489 |
+
# Upload README
|
| 490 |
+
readme_content = create_readme(
|
| 491 |
+
args, global_stats, temporal_stats, scan_time, ascii_charts
|
| 492 |
+
)
|
| 493 |
api.upload_file(
|
| 494 |
path_or_fileobj=readme_content.encode(),
|
| 495 |
path_in_repo="README.md",
|
|
|
|
| 505 |
|
| 506 |
if __name__ == "__main__":
|
| 507 |
if len(sys.argv) == 1:
|
| 508 |
+
print("Is the Web Getting More Educational?")
|
| 509 |
+
print("=" * 40)
|
| 510 |
+
print("\nAnalyze educational quality trends across CommonCrawl dumps")
|
| 511 |
+
print("using Polars streaming - no download needed!\n")
|
| 512 |
print("Example commands:\n")
|
| 513 |
+
print("# Quick test:")
|
|
|
|
|
|
|
| 514 |
print("uv run finepdfs-stats.py --limit 10000\n")
|
| 515 |
+
print("# Analyze English PDFs:")
|
| 516 |
+
print("uv run finepdfs-stats.py\n")
|
| 517 |
+
print("# Analyze ALL 70+ languages:")
|
| 518 |
+
print("uv run finepdfs-stats.py --all-languages\n")
|
| 519 |
print("# Show query plan (see Polars optimization):")
|
| 520 |
print("uv run finepdfs-stats.py --show-plan --limit 1000\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 521 |
print("# Save results to HF Hub:")
|
| 522 |
+
print("uv run finepdfs-stats.py --output-repo username/temporal-stats\n")
|
| 523 |
+
print("# Run on HF Jobs:")
|
| 524 |
+
print("hf jobs uv run \\")
|
| 525 |
print(" -s HF_TOKEN \\")
|
| 526 |
print(" -e HF_XET_HIGH_PERFORMANCE=1 \\")
|
| 527 |
+
print(
|
| 528 |
+
" https://huggingface.co/datasets/uv-scripts/dataset-stats/raw/main/finepdfs-stats.py \\"
|
| 529 |
+
)
|
| 530 |
print(" -- --output-repo username/stats")
|
| 531 |
sys.exit(0)
|
| 532 |
|