cb-telemetry / scripts /validate_cb_telemetry.py
ghdgfxzfdz's picture
Refresh Croissant metadata and frozen archive
8a1e39f verified
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import csv
import gzip
import hashlib
import json
import re
from pathlib import Path
from typing import Iterable
TEXT_SUFFIXES = {
".csv",
".json",
".md",
".txt",
".cff",
".py",
".sh",
".yaml",
".yml",
}
LOCAL_PATH_PATTERNS = [
re.compile(r"/" + r"Users/"),
re.compile(r"/home/"),
re.compile(r"\\Users\\"),
]
DRAFT_TEXT_PATTERNS = [
re.compile(r"\b" + "T" + "BD" + r"\b", re.IGNORECASE),
re.compile(r"\b" + "TO" + "DO" + r"\b", re.IGNORECASE),
re.compile("place" + "holder", re.IGNORECASE),
]
JUNK_FILE_NAMES = {".DS_Store", "Thumbs.db"}
JUNK_DIR_NAMES = {"__MACOSX"}
AUDIO_SUFFIXES = {".aac", ".flac", ".m4a", ".mp3", ".ogg", ".wav"}
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Validate a CB-Telemetry release package.")
parser.add_argument("--root", required=True, help="CB-Telemetry release root.")
parser.add_argument("--write-report", action="store_true", help="Write qa/validation_report.{json,md}.")
return parser.parse_args()
def read_csv_rows(path: Path) -> list[dict[str, str]]:
if path.suffix == ".gz":
handle = gzip.open(path, "rt", encoding="utf-8", newline="")
else:
handle = path.open("r", encoding="utf-8", newline="")
with handle:
return list(csv.DictReader(handle))
def sha256(path: Path) -> str:
digest = hashlib.sha256()
with path.open("rb") as handle:
for chunk in iter(lambda: handle.read(1024 * 1024), b""):
digest.update(chunk)
return digest.hexdigest()
def add_issue(bucket: list[dict[str, str]], check: str, message: str) -> None:
bucket.append({"check": check, "message": message})
def require_files(root: Path, rel_paths: Iterable[str], errors: list[dict[str, str]]) -> None:
for rel_path in rel_paths:
if not (root / rel_path).exists():
add_issue(errors, "required_file", f"Missing required file: {rel_path}")
def validate_package_hygiene(
root: Path,
errors: list[dict[str, str]],
warnings: list[dict[str, str]],
) -> None:
for path in root.rglob("*"):
rel = path.relative_to(root).as_posix()
if path.name in JUNK_FILE_NAMES:
add_issue(warnings, "package_hygiene", f"Local release directory contains OS metadata file: {rel}")
if path.is_dir() and path.name in JUNK_DIR_NAMES:
add_issue(errors, "package_hygiene", f"Release package contains OS metadata directory: {rel}")
def scan_text(root: Path, errors: list[dict[str, str]], warnings: list[dict[str, str]]) -> None:
for path in root.rglob("*"):
if not path.is_file():
continue
suffixes = path.suffixes
is_gzip_csv = len(suffixes) >= 2 and suffixes[-2:] == [".csv", ".gz"]
if path.suffix not in TEXT_SUFFIXES and not is_gzip_csv:
continue
try:
if is_gzip_csv:
text = gzip.open(path, "rt", encoding="utf-8", errors="replace").read()
else:
text = path.read_text(encoding="utf-8", errors="replace")
except UnicodeDecodeError:
continue
rel = path.relative_to(root).as_posix()
if rel == "scripts/validate_cb_telemetry.py" or rel in {
"qa/validation_report.json",
"qa/validation_report.md",
}:
continue
for pattern in LOCAL_PATH_PATTERNS:
if pattern.search(text):
add_issue(errors, "local_path_leak", f"Local path pattern found in {rel}: {pattern.pattern}")
for pattern in DRAFT_TEXT_PATTERNS:
if pattern.search(text):
add_issue(warnings, "draft_text", f"Draft-like marker found in {rel}: {pattern.pattern}")
def validate_ids(root: Path, errors: list[dict[str, str]], warnings: list[dict[str, str]]) -> dict[str, int]:
stats: dict[str, int] = {}
scored_path = root / "manifests" / "scored_snapshot_manifest.csv"
splits_path = root / "manifests" / "splits.csv"
default_features_path = root / "features" / "feature_table_default.csv.gz"
strict_features_path = root / "features" / "feature_table_strict_clean.csv.gz"
scored = read_csv_rows(scored_path)
splits = read_csv_rows(splits_path)
default_features = read_csv_rows(default_features_path)
strict_features = read_csv_rows(strict_features_path)
scored_ids = [row.get("recording_id", "") for row in scored]
split_ids = [row.get("recording_id", "") for row in splits]
default_ids = [row.get("recording_id", "") for row in default_features]
strict_ids = [row.get("recording_id", "") for row in strict_features]
stats["scored_rows"] = len(scored)
stats["split_rows"] = len(splits)
stats["default_feature_rows"] = len(default_features)
stats["strict_feature_rows"] = len(strict_features)
if len(set(scored_ids)) != len(scored_ids):
add_issue(errors, "recording_id_unique", "scored_snapshot_manifest.csv contains duplicate recording_id values.")
if set(scored_ids) != set(split_ids):
add_issue(errors, "split_alignment", "splits.csv recording_id set does not match scored snapshot.")
if set(scored_ids) != set(default_ids):
add_issue(errors, "default_feature_alignment", "default feature recording_id set does not match scored snapshot.")
strict_split_ids = {row["recording_id"] for row in splits if row.get("subset_strict_clean") == "1"}
if strict_split_ids != set(strict_ids):
add_issue(errors, "strict_feature_alignment", "strict feature recording_id set does not match strict-clean split.")
years = {row.get("year", "") for row in scored}
if "2011" in years:
add_issue(errors, "scored_year_boundary", "2011 rows are present in the scored snapshot.")
if not years or min(years) < "2012":
add_issue(errors, "scored_year_boundary", f"Unexpected scored year range: {sorted(years)}")
if max(years) > "2024":
add_issue(errors, "scored_year_boundary", f"Scored rows exceed J-STAGE 2012-2024 range: {sorted(years)}")
matched = sum(1 for row in scored if int(float(row.get("jstage_event_count") or 0)) > 0)
stats["jstage_matched_recordings"] = matched
if matched == 0:
add_issue(errors, "jstage_alignment", "No scored recordings have J-STAGE expert-overlap counts.")
elif matched < len(scored):
add_issue(
warnings,
"jstage_alignment",
f"{len(scored) - matched} scored recordings have no J-STAGE observation-minute overlap.",
)
expert_counts = [
int(float(row.get("expert_overlap_event_count") or 0))
for row in scored
if "expert_overlap_event_count" in row
]
if expert_counts and any(count <= 0 for count in expert_counts):
add_issue(errors, "expert_overlap", "Scored snapshot contains non-positive expert_overlap_event_count values.")
return stats
def validate_audio_samples(root: Path, errors: list[dict[str, str]], warnings: list[dict[str, str]]) -> dict[str, int]:
sample_path = root / "manifests" / "audio_sample_manifest.csv"
hf_metadata_path = root / "audio_sample" / "metadata.csv"
rows = read_csv_rows(sample_path)
hf_metadata_rows = read_csv_rows(hf_metadata_path) if hf_metadata_path.exists() else []
stats = {"audio_sample_rows": len(rows)}
if not rows:
add_issue(errors, "audio_sample", "audio_sample_manifest.csv is empty.")
return stats
sites = {row.get("site_en", "") for row in rows}
behaviors: set[str] = set()
referenced_paths: set[str] = set()
expected_hf_files: set[str] = set()
required_clip_columns = [
"audio_sample_format",
"clip_start_sec",
"clip_duration_sec",
"segment_start_sec",
"segment_end_sec",
"segment_duration_sec",
"clip_source_audio_path",
"clip_generation_tool",
]
for row in rows:
behaviors.update(item for item in row.get("behavior_types", "").split("|") if item)
audio_rel = row.get("audio_sample_path", "")
if not audio_rel:
add_issue(errors, "audio_sample_path", "Audio sample row has empty audio_sample_path.")
continue
referenced_paths.add(audio_rel)
try:
expected_hf_files.add(Path(audio_rel).relative_to("audio_sample").as_posix())
except ValueError:
add_issue(errors, "audio_sample_path", f"Audio sample path should be under audio_sample/: {audio_rel}")
for column in required_clip_columns:
if column not in row or row.get(column, "") == "":
add_issue(errors, "audio_sample_schema", f"Audio sample row missing {column}: {audio_rel}")
if row.get("audio_sample_format") != "m4a":
add_issue(errors, "audio_sample_format", f"Reviewer audio clip should be m4a: {audio_rel}")
try:
clip_start = float(row.get("clip_start_sec") or "nan")
clip_duration = float(row.get("clip_duration_sec") or "nan")
segment_start = float(row.get("segment_start_sec") or "nan")
segment_end = float(row.get("segment_end_sec") or "nan")
except ValueError:
add_issue(errors, "audio_sample_window", f"Invalid clip or segment window values: {audio_rel}")
continue
if clip_start < 0 or clip_duration <= 0:
add_issue(errors, "audio_sample_window", f"Invalid non-positive clip window: {audio_rel}")
if clip_duration > 30:
add_issue(errors, "audio_sample_window", f"Reviewer clip exceeds 30 seconds: {audio_rel}")
if segment_end < segment_start:
add_issue(errors, "audio_sample_window", f"Segment end precedes start: {audio_rel}")
audio_path = root / audio_rel
if not audio_path.exists():
add_issue(errors, "audio_sample_exists", f"Missing audio sample file: {audio_rel}")
continue
expected = row.get("sha256", "")
if expected and sha256(audio_path) != expected:
add_issue(errors, "audio_sample_sha256", f"SHA256 mismatch for {audio_rel}")
if int(audio_path.stat().st_size) > 10 * 1024 * 1024:
add_issue(warnings, "audio_sample_size", f"Reviewer audio clip is larger than 10MB: {audio_rel}")
audio_sample_dir = root / "audio_sample"
if audio_sample_dir.exists():
for path in audio_sample_dir.rglob("*"):
if not path.is_file():
continue
if path.suffix.lower() not in AUDIO_SUFFIXES:
continue
rel = path.relative_to(root).as_posix()
if rel not in referenced_paths:
add_issue(errors, "audio_sample_orphan", f"Unreferenced audio sample file: {rel}")
if not hf_metadata_rows:
add_issue(errors, "hf_audio_metadata", "audio_sample/metadata.csv is missing or empty.")
else:
hf_files = {row.get("file_name", "") for row in hf_metadata_rows}
if hf_files != expected_hf_files:
add_issue(errors, "hf_audio_metadata", "audio_sample/metadata.csv file_name set does not match audio samples.")
stats["audio_sample_sites"] = len(sites)
stats["audio_sample_behaviors"] = len(behaviors)
if len(sites) < 3:
add_issue(warnings, "audio_sample_coverage", f"Audio sample covers only {len(sites)} sites.")
if not {"S", "C", "D"}.issubset(behaviors):
add_issue(warnings, "audio_sample_coverage", f"Audio sample behavior coverage is {sorted(behaviors)}.")
return stats
def validate_croissant(root: Path, errors: list[dict[str, str]]) -> None:
path = root / "croissant.json"
try:
data = json.loads(path.read_text(encoding="utf-8"))
except json.JSONDecodeError as exc:
add_issue(errors, "croissant_json", f"croissant.json is invalid JSON: {exc}")
return
required = [
"@context",
"@type",
"name",
"description",
"url",
"license",
"conformsTo",
"distribution",
"recordSet",
]
for key in required:
if key not in data:
add_issue(errors, "croissant_required_key", f"croissant.json missing required key: {key}")
if data.get("@type") != "sc:Dataset":
add_issue(errors, "croissant_dataset_type", 'croissant.json @type should be "sc:Dataset".')
if "by-nc-sa" not in str(data.get("license", "")).lower():
add_issue(errors, "croissant_license", "croissant.json license should identify CC BY-NC-SA 4.0.")
rai_required = [
"rai:dataLimitations",
"rai:dataBiases",
"rai:personalSensitiveInformation",
"rai:dataUseCases",
"rai:dataSocialImpact",
"rai:hasSyntheticData",
"prov:wasDerivedFrom",
"prov:wasGeneratedBy",
]
for key in rai_required:
value = data.get(key)
if key not in data or value is None or value == "" or value == []:
add_issue(errors, "croissant_rai_required_key", f"croissant.json missing required RAI/provenance key: {key}")
for item in data.get("distribution", []):
if item.get("@type") not in {"cr:FileObject", "sc:FileObject", "FileObject"}:
add_issue(errors, "croissant_distribution_type", f"croissant distribution {item.get('@id', item.get('name'))} is not a FileObject.")
for key in ["@id", "contentUrl", "encodingFormat"]:
if key not in item:
add_issue(errors, "croissant_distribution_required_key", f"croissant distribution missing {key}: {item}")
for record_set in data.get("recordSet", []):
if not record_set.get("@id"):
add_issue(errors, "croissant_recordset_id", f"croissant recordSet missing @id: {record_set.get('name')}")
for field in record_set.get("field", []):
if not (field.get("source") or "value" in field):
add_issue(errors, "croissant_field_source", f"croissant field missing source/value: {field.get('name')}")
def write_report(root: Path, report: dict[str, object]) -> None:
qa_dir = root / "qa"
qa_dir.mkdir(parents=True, exist_ok=True)
(qa_dir / "validation_report.json").write_text(
json.dumps(report, indent=2, ensure_ascii=False) + "\n",
encoding="utf-8",
)
lines = [
"# CB-Telemetry Validation Report",
"",
f"- status: `{report['status']}`",
f"- errors: `{len(report['errors'])}`",
f"- warnings: `{len(report['warnings'])}`",
"",
"## Stats",
"",
]
for key, value in sorted(report["stats"].items()):
lines.append(f"- {key}: `{value}`")
lines += ["", "## Errors", ""]
if report["errors"]:
for issue in report["errors"]:
lines.append(f"- `{issue['check']}`: {issue['message']}")
else:
lines.append("- none")
lines += ["", "## Warnings", ""]
if report["warnings"]:
for issue in report["warnings"]:
lines.append(f"- `{issue['check']}`: {issue['message']}")
else:
lines.append("- none")
(qa_dir / "validation_report.md").write_text("\n".join(lines) + "\n", encoding="utf-8")
def main() -> None:
args = parse_args()
root = Path(args.root).expanduser().resolve()
errors: list[dict[str, str]] = []
warnings: list[dict[str, str]] = []
stats: dict[str, int] = {}
require_files(
root,
[
"README.md",
"DATASET_CARD.md",
"LICENSE",
"LICENSE_MATRIX.md",
"CITATION.cff",
"croissant.json",
"annotations/cb_telemetry_annotations.csv.gz",
"manifests/audio_manifest.csv",
"manifests/audio_sample_manifest.csv",
"audio_sample/metadata.csv",
"manifests/scored_snapshot_manifest.csv",
"manifests/splits.csv",
"features/feature_table_default.csv.gz",
"features/feature_table_strict_clean.csv.gz",
"features/bottlenecks/default/standard_rvq_8bit_feature_table.csv.gz",
"features/bottlenecks/default/pq_8bit_feature_table.csv.gz",
"features/bottlenecks/default/opq_8bit_feature_table.csv.gz",
"features/bottlenecks/strict_clean/standard_rvq_8bit_feature_table.csv.gz",
"features/bottlenecks/strict_clean/pq_8bit_feature_table.csv.gz",
"features/bottlenecks/strict_clean/opq_8bit_feature_table.csv.gz",
"baselines/table_2_representation_structure.csv",
"baselines/table_3_retrieval_baselines_and_controls.csv",
"requirements.txt",
"scripts/download_audio_recordings.py",
"scripts/run_smoke_eval.py",
"scripts/run_retrieval_eval.py",
"scripts/run_release_evaluation.py",
],
errors,
)
if not errors:
validate_package_hygiene(root, errors, warnings)
stats.update(validate_ids(root, errors, warnings))
stats.update(validate_audio_samples(root, errors, warnings))
validate_croissant(root, errors)
scan_text(root, errors, warnings)
report = {
"status": "pass" if not errors else "fail",
"root": root.name,
"stats": stats,
"errors": errors,
"warnings": warnings,
}
if args.write_report:
write_report(root, report)
print(json.dumps(report, indent=2, ensure_ascii=False))
raise SystemExit(0 if not errors else 1)
if __name__ == "__main__":
main()