davanstrien HF Staff Claude Opus 4.5 commited on
Commit
16270cd
·
1 Parent(s): 7d6ee37

Add ASCII trend charts to temporal analysis

Browse files

- Add ascii-graph dependency for text-based visualizations
- Show both high_edu_rate and avg_edu_score charts by year
- Embed charts in README dataset card
- Display charts in console output

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

Files changed (1) hide show
  1. finepdfs-stats.py +235 -245
finepdfs-stats.py CHANGED
@@ -4,72 +4,35 @@
4
  # "polars>=1.31.0",
5
  # "huggingface-hub",
6
  # "datasets",
 
7
  # ]
8
  # ///
9
  """
10
- Compute aggregate statistics on FinePDFs datasets using Polars streaming.
11
 
12
- Demonstrates the new Polars HF Hub integration (polars#25521) which reduces
13
- API calls from 139 → 1 for datasets like finepdfs-edu, enabling efficient
14
- streaming aggregation without downloading the full dataset.
15
 
16
- Supported datasets:
17
- - HuggingFaceFW/finepdfs-edu (49.5M rows, 350B tokens) - educational subset
18
- - HuggingFaceFW/finepdfs (476M rows, 3T tokens) - full dataset
19
-
20
- This script computes:
21
- - Per-language statistics (doc count, token totals, avg edu scores)
22
- - Per-extractor statistics
23
- - Per-dump statistics
24
- - Global summary metrics
25
-
26
- The result is a small summary DataFrame that can be uploaded as a new dataset.
27
 
28
  Example usage:
29
- # List available language+script combinations
30
- uv run finepdfs-stats.py --list-languages
31
-
32
- # Compute stats for English (default: finepdfs-edu)
33
  uv run finepdfs-stats.py
34
 
35
- # Process French documents
36
- uv run finepdfs-stats.py --lang fra_Latn
37
-
38
- # Use full finepdfs dataset (476M rows)
39
- uv run finepdfs-stats.py --source-dataset HuggingFaceFW/finepdfs
40
-
41
- # Show query plan before execution
42
- uv run finepdfs-stats.py --show-plan --limit 1000
43
 
44
- # Limit to first N rows for testing
45
- uv run finepdfs-stats.py --limit 10000
46
 
47
- # Save results and upload to HF
48
- uv run finepdfs-stats.py --output-repo username/finepdfs-edu-stats
49
 
50
- # Run on HF Jobs (CPU is sufficient, no GPU needed)
51
- hf jobs uv run finepdfs-stats.py \\
52
- -s HF_TOKEN \\
53
- -e HF_XET_HIGH_PERFORMANCE=1 \\
54
- -- --output-repo username/finepdfs-edu-stats
55
-
56
- # Or run from a URL
57
  hf jobs uv run \\
58
  -s HF_TOKEN \\
59
  -e HF_XET_HIGH_PERFORMANCE=1 \\
60
- "https://huggingface.co/datasets/uv-scripts/data-stats/raw/main/finepdfs-stats.py" \\
61
- -- --output-repo username/finepdfs-edu-stats
62
-
63
- Why Polars scan_parquet?
64
- - Lazy evaluation: builds query plan without loading data
65
- - Streaming execution: processes data in chunks, constant memory
66
- - Native HF Hub support: hf://datasets/... paths just work
67
- - Optimized API calls: PR #25521 reduced API calls 10-100x for HF datasets
68
-
69
- Performance tips:
70
- - Set HF_XET_HIGH_PERFORMANCE=1 to maximize network/disk utilization
71
- - Use --limit for quick tests before running on full dataset
72
- - Use --show-plan to see Polars query optimization (projection pushdown)
73
  """
74
 
75
  import argparse
@@ -80,6 +43,7 @@ import time
80
  from pathlib import Path
81
 
82
  import polars as pl
 
83
  from datasets import Dataset
84
  from huggingface_hub import HfApi, create_repo, list_repo_tree, login
85
 
@@ -124,165 +88,173 @@ def list_available_languages(dataset_id: str) -> list[str]:
124
  return list(COMMON_LANGUAGES.keys())
125
 
126
 
127
- def compute_language_stats(df: pl.LazyFrame) -> pl.DataFrame:
128
- """Compute per-language statistics."""
129
- return (
130
- df.group_by("language")
131
- .agg(
132
- pl.len().alias("doc_count"),
133
- pl.col("token_count").sum().alias("total_tokens"),
134
- pl.col("token_count").mean().alias("avg_tokens"),
135
- pl.col("token_count").median().alias("median_tokens"),
136
- pl.col("token_count").min().alias("min_tokens"),
137
- pl.col("token_count").max().alias("max_tokens"),
138
- pl.col("page_average_lid_score").mean().alias("avg_lid_score"),
139
- pl.col("is_truncated").sum().alias("truncated_count"),
140
- pl.col("minhash_cluster_size").mean().alias("avg_cluster_size"),
141
- pl.col("duplicate_count").sum().alias("total_duplicates"),
142
- )
143
- .sort("doc_count", descending=True)
144
- .collect(engine="streaming")
 
 
 
 
 
 
 
145
  )
146
 
147
 
148
- def compute_extractor_stats(df: pl.LazyFrame) -> pl.DataFrame:
149
- """Compute per-extractor statistics."""
150
  return (
151
- df.group_by("extractor")
152
- .agg(
153
- pl.len().alias("doc_count"),
154
- pl.col("token_count").sum().alias("total_tokens"),
155
- pl.col("token_count").mean().alias("avg_tokens"),
156
- pl.col("is_truncated").sum().alias("truncated_count"),
157
- pl.col("page_average_lid_score").mean().alias("avg_lid_score"),
158
  )
159
- .sort("doc_count", descending=True)
160
- .collect(engine="streaming")
 
 
161
  )
162
 
163
 
164
- def compute_dump_stats(df: pl.LazyFrame) -> pl.DataFrame:
165
- """Compute per-dump statistics."""
166
- return (
167
- df.group_by("dump")
 
 
 
 
 
168
  .agg(
169
- pl.len().alias("doc_count"),
170
- pl.col("token_count").sum().alias("total_tokens"),
171
- pl.col("token_count").mean().alias("avg_tokens"),
172
  )
173
- .sort("doc_count", descending=True)
174
- .collect(engine="streaming")
175
  )
176
 
 
177
 
178
- def compute_global_stats(df: pl.LazyFrame) -> pl.DataFrame:
179
- """Compute global summary statistics."""
180
- return df.select(
181
- pl.len().alias("total_docs"),
182
- pl.col("token_count").sum().alias("total_tokens"),
183
- pl.col("token_count").mean().alias("avg_tokens"),
184
- pl.col("token_count").median().alias("median_tokens"),
185
- pl.col("token_count").std().alias("std_tokens"),
186
- pl.col("is_truncated").sum().alias("truncated_docs"),
187
- pl.col("is_truncated").mean().alias("truncation_rate"),
188
- pl.col("minhash_cluster_size").mean().alias("avg_cluster_size"),
189
- pl.col("duplicate_count").sum().alias("total_duplicates"),
190
- pl.col("language").n_unique().alias("unique_languages"),
191
- pl.col("extractor").n_unique().alias("unique_extractors"),
192
- pl.col("dump").n_unique().alias("unique_dumps"),
193
- ).collect(engine="streaming")
 
 
194
 
195
 
196
  def create_readme(
197
  args,
198
  global_stats: pl.DataFrame,
199
- timings: dict[str, float],
 
 
200
  ) -> str:
201
  """Create README content for the stats dataset."""
202
  stats = global_stats.to_dicts()[0]
203
- lang_name = COMMON_LANGUAGES.get(args.lang, args.lang)
204
- total_time = sum(timings.values())
205
 
206
- # Format timings table
207
- timing_rows = "\n".join(f"| {name} | {t:.2f}s |" for name, t in timings.items())
 
 
 
 
 
 
 
208
 
209
  return f"""---
210
  tags:
211
  - statistics
212
  - polars
213
  - finepdfs-edu
 
214
  license: odc-by
215
  ---
216
 
217
- # Statistics for {args.source_dataset} ({lang_name})
218
 
219
- Aggregate statistics computed using Polars streaming on the [{args.source_dataset}](https://huggingface.co/datasets/{args.source_dataset}) dataset.
220
 
221
- ## Performance
222
 
223
- Processed **{stats.get("total_docs", 0):,} documents** in **{total_time:.2f} seconds**.
 
 
224
 
225
- | Step | Time |
226
- |------|------|
227
- {timing_rows}
228
- | **Total** | **{total_time:.2f}s** |
229
 
230
- > Speed comes from Polars only reading metadata columns (not the `text` column),
231
- > thanks to Parquet's columnar format and lazy evaluation.
 
 
232
 
233
- ## How This Was Generated
234
 
235
- This dataset demonstrates **Polars streaming aggregation** with HuggingFace Hub integration.
236
- Thanks to [polars#25521](https://github.com/pola-rs/polars/pull/25521), `scan_parquet`
237
- with `hf://` paths now uses far fewer API calls (139 → 1 for finepdfs-edu).
238
 
239
- ```bash
240
- uv run finepdfs-stats.py --lang {args.lang} --output-repo {args.output_repo or "username/stats"}
241
- ```
242
-
243
- ## Global Summary
244
 
245
  | Metric | Value |
246
  |--------|-------|
247
- | Language | {lang_name} (`{args.lang}`) |
248
- | Total Documents | {stats.get("total_docs", "N/A"):,} |
249
- | Total Tokens | {stats.get("total_tokens", "N/A"):,} |
250
- | Average Tokens/Doc | {stats.get("avg_tokens", 0):,.0f} |
251
- | Truncated Documents | {stats.get("truncated_docs", 0):,} ({stats.get("truncation_rate", 0) * 100:.1f}%) |
252
- | Unique Languages | {stats.get("unique_languages", "N/A")} |
253
- | Unique Extractors | {stats.get("unique_extractors", "N/A")} |
254
- | Unique Dumps | {stats.get("unique_dumps", "N/A")} |
255
-
256
- ## Configs
257
-
258
- - `global_stats` - Overall summary (1 row)
259
- - `language_stats` - Per-language aggregations
260
- - `extractor_stats` - Per-extractor aggregations
261
- - `dump_stats` - Per-dump aggregations
262
-
263
- ## Usage
264
-
265
- ```python
266
- from datasets import load_dataset
267
-
268
- # Load all configs
269
- global_stats = load_dataset("{args.output_repo or "username/stats"}", "global_stats")
270
- lang_stats = load_dataset("{args.output_repo or "username/stats"}", "language_stats")
271
- extractor_stats = load_dataset("{args.output_repo or "username/stats"}", "extractor_stats")
272
- dump_stats = load_dataset("{args.output_repo or "username/stats"}", "dump_stats")
273
  ```
274
 
275
  ## Source
276
 
277
  - **Dataset**: [{args.source_dataset}](https://huggingface.co/datasets/{args.source_dataset})
278
- - **Language**: {args.lang}
279
- - **Script**: [finepdfs-stats.py](https://huggingface.co/datasets/uv-scripts/data-stats)
280
  """
281
 
282
 
283
  def main():
284
  parser = argparse.ArgumentParser(
285
- description="Compute aggregate statistics on HF datasets using Polars streaming",
286
  formatter_class=argparse.RawDescriptionHelpFormatter,
287
  epilog=__doc__,
288
  )
@@ -291,26 +263,32 @@ def main():
291
  "--source-dataset",
292
  type=str,
293
  default="HuggingFaceFW/finepdfs-edu",
294
- help="Source dataset: HuggingFaceFW/finepdfs-edu (49.5M rows) or HuggingFaceFW/finepdfs (476M rows)",
295
  )
296
 
297
  parser.add_argument(
298
- "--show-plan",
 
 
 
 
 
 
 
299
  action="store_true",
300
- help="Show Polars query plan before execution (demonstrates optimization)",
301
  )
302
 
303
  parser.add_argument(
304
- "--lang",
305
- type=str,
306
- default="eng_Latn",
307
- help="Language+script code to process, e.g., eng_Latn, fra_Latn, zho_Hans (default: eng_Latn)",
308
  )
309
 
310
  parser.add_argument(
311
  "--list-languages",
312
  action="store_true",
313
- help="List available language+script codes and exit",
314
  )
315
 
316
  parser.add_argument(
@@ -329,7 +307,7 @@ def main():
329
  "--output-dir",
330
  type=str,
331
  default="./stats_output",
332
- help="Local directory for output files (default: ./stats_output)",
333
  )
334
 
335
  parser.add_argument(
@@ -367,11 +345,17 @@ def main():
367
  sys.exit(0)
368
 
369
  # Build the parquet path
370
- source_path = (
371
- f"hf://datasets/{args.source_dataset}/data/{args.lang}/train/*.parquet"
372
- )
 
 
 
 
 
 
373
  logger.info(f"Scanning: {source_path}")
374
- logger.info(f"Language: {args.lang} ({COMMON_LANGUAGES.get(args.lang, 'unknown')})")
375
 
376
  # Create lazy frame - this doesn't load any data yet!
377
  logger.info("Creating lazy query plan...")
@@ -401,60 +385,64 @@ def main():
401
  output_dir = Path(args.output_dir)
402
  output_dir.mkdir(parents=True, exist_ok=True)
403
 
404
- # Track timings
405
- timings: dict[str, float] = {}
406
-
407
- # Compute statistics (streaming execution happens here)
408
- logger.info("Computing global statistics...")
409
  start = time.perf_counter()
410
- global_stats = compute_global_stats(df)
411
- timings["Global stats"] = time.perf_counter() - start
412
- print("\nGlobal Statistics:")
413
- print(global_stats)
 
 
 
 
 
 
414
  global_stats.write_parquet(output_dir / "global_stats.parquet")
 
415
 
416
- logger.info("Computing per-language statistics...")
417
- start = time.perf_counter()
418
- # Need to re-scan since we consumed the lazy frame
419
- df = pl.scan_parquet(source_path)
420
- if args.limit:
421
- df = df.head(args.limit)
422
- lang_stats = compute_language_stats(df)
423
- timings["Language stats"] = time.perf_counter() - start
424
- print(f"\nLanguage Statistics ({len(lang_stats)} languages):")
425
- print(lang_stats.head(20))
426
- lang_stats.write_parquet(output_dir / "language_stats.parquet")
427
 
428
- logger.info("Computing per-extractor statistics...")
429
- start = time.perf_counter()
430
- df = pl.scan_parquet(source_path)
431
- if args.limit:
432
- df = df.head(args.limit)
433
- extractor_stats = compute_extractor_stats(df)
434
- timings["Extractor stats"] = time.perf_counter() - start
435
- print("\nExtractor Statistics:")
436
- print(extractor_stats)
437
- extractor_stats.write_parquet(output_dir / "extractor_stats.parquet")
438
 
439
- logger.info("Computing per-dump statistics...")
440
- start = time.perf_counter()
441
- df = pl.scan_parquet(source_path)
442
- if args.limit:
443
- df = df.head(args.limit)
444
- dump_stats = compute_dump_stats(df)
445
- timings["Dump stats"] = time.perf_counter() - start
446
- print(f"\nDump Statistics ({len(dump_stats)} dumps):")
447
- print(dump_stats.head(20))
448
- dump_stats.write_parquet(output_dir / "dump_stats.parquet")
449
-
450
- # Print timing summary
451
- total_time = sum(timings.values())
452
- print("\nTiming Summary:")
453
- print("-" * 30)
454
- for name, t in timings.items():
455
- print(f" {name}: {t:.2f}s")
456
- print("-" * 30)
457
- print(f" Total: {total_time:.2f}s")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458
 
459
  logger.info(f"Results saved to: {output_dir}")
460
 
@@ -481,26 +469,27 @@ def main():
481
  else:
482
  raise
483
 
484
- # Upload each stats DataFrame as a separate config using datasets
485
- configs = {
486
- "global_stats": global_stats,
487
- "language_stats": lang_stats,
488
- "extractor_stats": extractor_stats,
489
- "dump_stats": dump_stats,
490
- }
491
 
492
- for config_name, df in configs.items():
493
  logger.info(f"Uploading {config_name}...")
494
- ds = Dataset.from_polars(df)
495
  ds.push_to_hub(
496
  args.output_repo,
497
  config_name=config_name,
498
  token=hf_token,
499
  private=args.private,
500
  )
 
501
 
502
- # Create and upload README
503
- readme_content = create_readme(args, global_stats, timings)
 
 
504
  api.upload_file(
505
  path_or_fileobj=readme_content.encode(),
506
  path_in_repo="README.md",
@@ -516,27 +505,28 @@ def main():
516
 
517
  if __name__ == "__main__":
518
  if len(sys.argv) == 1:
519
- print("FinePDFs Statistics - Polars Streaming Demo")
520
- print("=" * 45)
521
- print("\nCompute aggregate statistics on FinePDFs datasets")
522
- print("using Polars streaming - no need to download the full dataset!\n")
523
  print("Example commands:\n")
524
- print("# List available languages:")
525
- print("uv run finepdfs-stats.py --list-languages\n")
526
- print("# Quick test with 10k rows:")
527
  print("uv run finepdfs-stats.py --limit 10000\n")
 
 
 
 
528
  print("# Show query plan (see Polars optimization):")
529
  print("uv run finepdfs-stats.py --show-plan --limit 1000\n")
530
- print("# Process English (default: finepdfs-edu):")
531
- print("uv run finepdfs-stats.py\n")
532
- print("# Use full finepdfs dataset (476M rows):")
533
- print("uv run finepdfs-stats.py --source-dataset HuggingFaceFW/finepdfs\n")
534
  print("# Save results to HF Hub:")
535
- print("uv run finepdfs-stats.py --output-repo username/finepdfs-edu-stats\n")
536
- print("# Run on HF Jobs (CPU, with high-performance transfers):")
537
- print("hf jobs uv run finepdfs-stats.py \\")
538
  print(" -s HF_TOKEN \\")
539
  print(" -e HF_XET_HIGH_PERFORMANCE=1 \\")
 
 
 
540
  print(" -- --output-repo username/stats")
541
  sys.exit(0)
542
 
 
4
  # "polars>=1.31.0",
5
  # "huggingface-hub",
6
  # "datasets",
7
+ # "ascii-graph",
8
  # ]
9
  # ///
10
  """
11
+ Analyze educational quality trends across CommonCrawl dumps using Polars streaming.
12
 
13
+ Answers: "Is the web getting more educational over time?"
 
 
14
 
15
+ Demonstrates Polars HF Hub integration - process 23M+ docs without downloading 300GB.
 
 
 
 
 
 
 
 
 
 
16
 
17
  Example usage:
18
+ # Analyze English PDFs (default)
 
 
 
19
  uv run finepdfs-stats.py
20
 
21
+ # Analyze all 70+ languages
22
+ uv run finepdfs-stats.py --all-languages
 
 
 
 
 
 
23
 
24
+ # Quick test
25
+ uv run finepdfs-stats.py --limit 10000 --show-plan
26
 
27
+ # Save results to HF Hub
28
+ uv run finepdfs-stats.py --output-repo username/finepdfs-temporal-stats
29
 
30
+ # Run on HF Jobs
 
 
 
 
 
 
31
  hf jobs uv run \\
32
  -s HF_TOKEN \\
33
  -e HF_XET_HIGH_PERFORMANCE=1 \\
34
+ https://huggingface.co/datasets/uv-scripts/dataset-stats/raw/main/finepdfs-stats.py \\
35
+ -- --output-repo username/stats
 
 
 
 
 
 
 
 
 
 
 
36
  """
37
 
38
  import argparse
 
43
  from pathlib import Path
44
 
45
  import polars as pl
46
+ from ascii_graph import Pyasciigraph
47
  from datasets import Dataset
48
  from huggingface_hub import HfApi, create_repo, list_repo_tree, login
49
 
 
88
  return list(COMMON_LANGUAGES.keys())
89
 
90
 
91
+ def compute_temporal_stats(df: pl.LazyFrame, output_path: Path) -> pl.DataFrame:
92
+ """Single scan: compute stats grouped by dump for temporal analysis."""
93
+ query = df.group_by("dump").agg(
94
+ pl.len().alias("doc_count"),
95
+ pl.col("token_count").sum().alias("total_tokens"),
96
+ pl.col("fw_edu_scores").list.mean().mean().alias("avg_edu_score"),
97
+ (pl.col("fw_edu_scores").list.mean() >= 3).sum().alias("high_edu_count"),
98
+ )
99
+ query.sink_parquet(output_path, engine="streaming")
100
+ return pl.read_parquet(output_path)
101
+
102
+
103
+ def compute_global_stats(temporal: pl.DataFrame) -> pl.DataFrame:
104
+ """Compute global stats from temporal breakdown."""
105
+ total = temporal["doc_count"].sum()
106
+ return pl.DataFrame(
107
+ {
108
+ "total_docs": [total],
109
+ "total_tokens": [temporal["total_tokens"].sum()],
110
+ "avg_edu_score": [
111
+ (temporal["avg_edu_score"] * temporal["doc_count"]).sum() / total
112
+ ],
113
+ "high_edu_rate": [temporal["high_edu_count"].sum() / total],
114
+ "num_dumps": [len(temporal)],
115
+ }
116
  )
117
 
118
 
119
+ def format_temporal_stats(temporal: pl.DataFrame) -> pl.DataFrame:
120
+ """Format temporal stats with high_edu_rate, sorted chronologically."""
121
  return (
122
+ temporal.with_columns(
123
+ (pl.col("high_edu_count") / pl.col("doc_count")).alias("high_edu_rate")
 
 
 
 
 
124
  )
125
+ .select(["dump", "doc_count", "avg_edu_score", "high_edu_rate"])
126
+ .sort(
127
+ "dump"
128
+ ) # Chronological order (CC-MAIN-2017-xx comes before CC-MAIN-2024-xx)
129
  )
130
 
131
 
132
+ def create_ascii_charts(temporal_stats: pl.DataFrame) -> str:
133
+ """Create ASCII bar charts showing temporal trends."""
134
+ # Extract year from dump name (CC-MAIN-2024-42 -> 2024)
135
+ # Group by year and average the values for cleaner display
136
+ yearly = (
137
+ temporal_stats.with_columns(
138
+ pl.col("dump").str.extract(r"CC-MAIN-(\d{4})", 1).alias("year")
139
+ )
140
+ .group_by("year")
141
  .agg(
142
+ pl.col("doc_count").sum(),
143
+ pl.col("avg_edu_score").mean(),
144
+ pl.col("high_edu_rate").mean(),
145
  )
146
+ .sort("year")
 
147
  )
148
 
149
+ lines = []
150
 
151
+ # High edu rate chart (more dramatic differences)
152
+ data_rate = [
153
+ (row["year"], row["high_edu_rate"] * 100)
154
+ for row in yearly.iter_rows(named=True)
155
+ ]
156
+ graph = Pyasciigraph(line_length=60, float_format="{0:.1f}%")
157
+ lines.extend(graph.graph("High Educational Content (edu >= 3)", data_rate))
158
+
159
+ lines.append("")
160
+
161
+ # Avg edu score chart
162
+ data_score = [
163
+ (row["year"], row["avg_edu_score"]) for row in yearly.iter_rows(named=True)
164
+ ]
165
+ graph2 = Pyasciigraph(line_length=60, float_format="{0:.2f}")
166
+ lines.extend(graph2.graph("Average Educational Score", data_score))
167
+
168
+ return "\n".join(lines)
169
 
170
 
171
  def create_readme(
172
  args,
173
  global_stats: pl.DataFrame,
174
+ temporal_stats: pl.DataFrame,
175
+ scan_time: float,
176
+ ascii_charts: str,
177
  ) -> str:
178
  """Create README content for the stats dataset."""
179
  stats = global_stats.to_dicts()[0]
180
+ total_docs = stats.get("total_docs", 0)
181
+ docs_per_sec = total_docs / scan_time if scan_time > 0 else 0
182
 
183
+ # Get first and last dumps for trend
184
+ first_dump = temporal_stats.head(1).to_dicts()[0]
185
+ last_dump = temporal_stats.tail(1).to_dicts()[0]
186
+
187
+ scope = (
188
+ "all languages"
189
+ if args.all_languages
190
+ else COMMON_LANGUAGES.get(args.lang, args.lang)
191
+ )
192
 
193
  return f"""---
194
  tags:
195
  - statistics
196
  - polars
197
  - finepdfs-edu
198
+ - temporal-analysis
199
  license: odc-by
200
  ---
201
 
202
+ # Is the Web Getting More Educational?
203
 
204
+ Temporal analysis of educational quality across {stats.get("num_dumps", 0)} CommonCrawl dumps.
205
 
206
+ ## Trend
207
 
208
+ ```
209
+ {ascii_charts}
210
+ ```
211
 
212
+ ## Key Finding
 
 
 
213
 
214
+ | Period | Avg Edu Score | High Edu Rate |
215
+ |--------|---------------|---------------|
216
+ | {first_dump["dump"]} | {first_dump["avg_edu_score"]:.3f} | {first_dump["high_edu_rate"] * 100:.1f}% |
217
+ | {last_dump["dump"]} | {last_dump["avg_edu_score"]:.3f} | {last_dump["high_edu_rate"] * 100:.1f}% |
218
 
219
+ ## Performance
220
 
221
+ - **{total_docs:,} documents** processed in **{scan_time:.0f} seconds**
222
+ - **{docs_per_sec:,.0f} docs/sec** using Polars streaming
223
+ - Single scan, no download of 300GB+ dataset
224
 
225
+ ## Summary
 
 
 
 
226
 
227
  | Metric | Value |
228
  |--------|-------|
229
+ | Scope | {scope} |
230
+ | Total Documents | {total_docs:,} |
231
+ | Total Tokens | {stats.get("total_tokens", 0):,} |
232
+ | Avg Edu Score | {stats.get("avg_edu_score", 0):.3f} |
233
+ | High Edu Rate | {stats.get("high_edu_rate", 0) * 100:.1f}% |
234
+ | CommonCrawl Dumps | {stats.get("num_dumps", 0)} |
235
+
236
+ ## Files
237
+
238
+ - `global_stats` - Overall summary
239
+ - `temporal_stats` - Per-dump breakdown (sorted chronologically)
240
+
241
+ ## Reproduce
242
+
243
+ ```bash
244
+ uv run https://huggingface.co/datasets/uv-scripts/dataset-stats/raw/main/finepdfs-stats.py \\
245
+ {"--all-languages" if args.all_languages else f"--lang {args.lang}"} --output-repo your-username/stats
 
 
 
 
 
 
 
 
 
246
  ```
247
 
248
  ## Source
249
 
250
  - **Dataset**: [{args.source_dataset}](https://huggingface.co/datasets/{args.source_dataset})
251
+ - **Script**: [uv-scripts/dataset-stats](https://huggingface.co/datasets/uv-scripts/dataset-stats)
 
252
  """
253
 
254
 
255
  def main():
256
  parser = argparse.ArgumentParser(
257
+ description="Analyze educational quality trends across CommonCrawl dumps",
258
  formatter_class=argparse.RawDescriptionHelpFormatter,
259
  epilog=__doc__,
260
  )
 
263
  "--source-dataset",
264
  type=str,
265
  default="HuggingFaceFW/finepdfs-edu",
266
+ help="Source dataset (default: HuggingFaceFW/finepdfs-edu)",
267
  )
268
 
269
  parser.add_argument(
270
+ "--lang",
271
+ type=str,
272
+ default="eng_Latn",
273
+ help="Language+script code (default: eng_Latn)",
274
+ )
275
+
276
+ parser.add_argument(
277
+ "--all-languages",
278
  action="store_true",
279
+ help="Analyze all languages (70+) instead of single language",
280
  )
281
 
282
  parser.add_argument(
283
+ "--show-plan",
284
+ action="store_true",
285
+ help="Show Polars query plan (demonstrates optimization)",
 
286
  )
287
 
288
  parser.add_argument(
289
  "--list-languages",
290
  action="store_true",
291
+ help="List available languages and exit",
292
  )
293
 
294
  parser.add_argument(
 
307
  "--output-dir",
308
  type=str,
309
  default="./stats_output",
310
+ help="Local directory for output files",
311
  )
312
 
313
  parser.add_argument(
 
345
  sys.exit(0)
346
 
347
  # Build the parquet path
348
+ if args.all_languages:
349
+ source_path = f"hf://datasets/{args.source_dataset}/data/*/train/*.parquet"
350
+ scope_desc = "all languages"
351
+ else:
352
+ source_path = (
353
+ f"hf://datasets/{args.source_dataset}/data/{args.lang}/train/*.parquet"
354
+ )
355
+ scope_desc = f"{args.lang} ({COMMON_LANGUAGES.get(args.lang, 'unknown')})"
356
+
357
  logger.info(f"Scanning: {source_path}")
358
+ logger.info(f"Scope: {scope_desc}")
359
 
360
  # Create lazy frame - this doesn't load any data yet!
361
  logger.info("Creating lazy query plan...")
 
385
  output_dir = Path(args.output_dir)
386
  output_dir.mkdir(parents=True, exist_ok=True)
387
 
388
+ # Single scan: compute temporal stats
389
+ logger.info("Computing temporal stats (single scan)...")
 
 
 
390
  start = time.perf_counter()
391
+ temporal_path = output_dir / "temporal_stats.parquet"
392
+ temporal_raw = compute_temporal_stats(df, temporal_path)
393
+ scan_time = time.perf_counter() - start
394
+ logger.info(f"Scan complete in {scan_time:.2f}s - {len(temporal_raw)} dumps")
395
+
396
+ # Compute stats
397
+ global_stats = compute_global_stats(temporal_raw)
398
+ temporal_stats = format_temporal_stats(temporal_raw)
399
+
400
+ # Save
401
  global_stats.write_parquet(output_dir / "global_stats.parquet")
402
+ temporal_stats.write_parquet(output_dir / "temporal_stats.parquet")
403
 
404
+ # Print results
405
+ total_docs = global_stats["total_docs"][0]
406
+ docs_per_sec = total_docs / scan_time if scan_time > 0 else 0
 
 
 
 
 
 
 
 
407
 
408
+ print("\n" + "=" * 70)
409
+ print("IS THE WEB GETTING MORE EDUCATIONAL?")
410
+ print("=" * 70)
 
 
 
 
 
 
 
411
 
412
+ print(f"\nScope: {scope_desc}")
413
+ print(f"Dataset: {args.source_dataset}")
414
+
415
+ print("\n" + "-" * 70)
416
+ print("GLOBAL STATS")
417
+ print("-" * 70)
418
+ print(global_stats)
419
+
420
+ print("\n" + "-" * 70)
421
+ print(f"TEMPORAL TREND ({len(temporal_stats)} CommonCrawl dumps)")
422
+ print("-" * 70)
423
+ # Show first 5 and last 5
424
+ if len(temporal_stats) > 10:
425
+ print("Earliest dumps:")
426
+ print(temporal_stats.head(5))
427
+ print("\n...")
428
+ print("\nLatest dumps:")
429
+ print(temporal_stats.tail(5))
430
+ else:
431
+ print(temporal_stats)
432
+
433
+ # Create ASCII charts
434
+ ascii_charts = create_ascii_charts(temporal_stats)
435
+ print("\n" + "-" * 70)
436
+ print("TREND VISUALIZATION")
437
+ print("-" * 70)
438
+ print(ascii_charts)
439
+
440
+ print("\n" + "-" * 70)
441
+ print("PERFORMANCE")
442
+ print("-" * 70)
443
+ print(f"Scan time: {scan_time:.2f}s")
444
+ print(f"Documents: {total_docs:,}")
445
+ print(f"Throughput: {docs_per_sec:,.0f} docs/sec")
446
 
447
  logger.info(f"Results saved to: {output_dir}")
448
 
 
469
  else:
470
  raise
471
 
472
+ # Upload each as a dataset config
473
+ configs = [
474
+ ("global_stats", global_stats),
475
+ ("temporal_stats", temporal_stats),
476
+ ]
 
 
477
 
478
+ for config_name, stats_df in configs:
479
  logger.info(f"Uploading {config_name}...")
480
+ ds = Dataset.from_polars(stats_df)
481
  ds.push_to_hub(
482
  args.output_repo,
483
  config_name=config_name,
484
  token=hf_token,
485
  private=args.private,
486
  )
487
+ time.sleep(1) # Avoid 409 conflicts
488
 
489
+ # Upload README
490
+ readme_content = create_readme(
491
+ args, global_stats, temporal_stats, scan_time, ascii_charts
492
+ )
493
  api.upload_file(
494
  path_or_fileobj=readme_content.encode(),
495
  path_in_repo="README.md",
 
505
 
506
  if __name__ == "__main__":
507
  if len(sys.argv) == 1:
508
+ print("Is the Web Getting More Educational?")
509
+ print("=" * 40)
510
+ print("\nAnalyze educational quality trends across CommonCrawl dumps")
511
+ print("using Polars streaming - no download needed!\n")
512
  print("Example commands:\n")
513
+ print("# Quick test:")
 
 
514
  print("uv run finepdfs-stats.py --limit 10000\n")
515
+ print("# Analyze English PDFs:")
516
+ print("uv run finepdfs-stats.py\n")
517
+ print("# Analyze ALL 70+ languages:")
518
+ print("uv run finepdfs-stats.py --all-languages\n")
519
  print("# Show query plan (see Polars optimization):")
520
  print("uv run finepdfs-stats.py --show-plan --limit 1000\n")
 
 
 
 
521
  print("# Save results to HF Hub:")
522
+ print("uv run finepdfs-stats.py --output-repo username/temporal-stats\n")
523
+ print("# Run on HF Jobs:")
524
+ print("hf jobs uv run \\")
525
  print(" -s HF_TOKEN \\")
526
  print(" -e HF_XET_HIGH_PERFORMANCE=1 \\")
527
+ print(
528
+ " https://huggingface.co/datasets/uv-scripts/dataset-stats/raw/main/finepdfs-stats.py \\"
529
+ )
530
  print(" -- --output-repo username/stats")
531
  sys.exit(0)
532