Datasets:
Tasks:
Feature Extraction
Languages:
English
Size:
1K<n<10K
Tags:
representation-similarity
representation-convergence
cross-model-transport
benchmark
alignment
evaluation
License:
| { | |
| "@context": { | |
| "@language": "en", | |
| "@vocab": "https://schema.org/", | |
| "citeAs": "cr:citeAs", | |
| "column": "cr:column", | |
| "conformsTo": "dct:conformsTo", | |
| "cr": "http://mlcommons.org/croissant/", | |
| "rai": "http://mlcommons.org/croissant/RAI/", | |
| "data": { | |
| "@id": "cr:data", | |
| "@type": "@json" | |
| }, | |
| "dataType": { | |
| "@id": "cr:dataType", | |
| "@type": "@vocab" | |
| }, | |
| "dct": "http://purl.org/dc/terms/", | |
| "examples": { | |
| "@id": "cr:examples", | |
| "@type": "@json" | |
| }, | |
| "extract": "cr:extract", | |
| "field": "cr:field", | |
| "fileProperty": "cr:fileProperty", | |
| "fileObject": "cr:fileObject", | |
| "fileSet": "cr:fileSet", | |
| "format": "cr:format", | |
| "includes": "cr:includes", | |
| "isLiveDataset": "cr:isLiveDataset", | |
| "jsonPath": "cr:jsonPath", | |
| "key": "cr:key", | |
| "md5": "cr:md5", | |
| "parentField": "cr:parentField", | |
| "path": "cr:path", | |
| "recordSet": "cr:recordSet", | |
| "references": "cr:references", | |
| "regex": "cr:regex", | |
| "repeated": "cr:repeated", | |
| "replace": "cr:replace", | |
| "sc": "https://schema.org/", | |
| "separator": "cr:separator", | |
| "source": "cr:source", | |
| "subField": "cr:subField", | |
| "transform": "cr:transform" | |
| }, | |
| "@type": "sc:Dataset", | |
| "name": "BCCT-Hub", | |
| "description": "BCCT-Hub: a benchmark and toolkit for measuring representation convergence across model families. The release contains four pre-computed pairwise compatibility atlases (vision: 190 pairs across 20 encoders; language: 36 pairs across 9 LLMs; audio: 15 pairs; video: 15 pairs), 41 pre-extracted feature tensors, statistical-analysis JSON outputs, an 88-paper meta-analysis CSV, and a per-claim artifact manifest mapping every paper claim to its producing script. The release accompanies a NeurIPS 2026 Evaluations & Datasets Track submission and is intended for representation-similarity benchmarking research.", | |
| "url": "https://anonymous.4open.science/r/bcct-hub", | |
| "version": "0.1.0", | |
| "license": "https://www.apache.org/licenses/LICENSE-2.0", | |
| "citeAs": "Anonymous Authors. BCCT-Hub: A Benchmark and Toolkit for Measuring Representation Convergence Across Model Families. NeurIPS 2026 Evaluations & Datasets Track (under review).", | |
| "datePublished": "2026-05-06", | |
| "conformsTo": "http://mlcommons.org/croissant/1.0", | |
| "keywords": [ | |
| "representation-similarity", | |
| "convergence", | |
| "transport", | |
| "benchmark", | |
| "scorecard", | |
| "alignment", | |
| "evaluation" | |
| ], | |
| "rai:dataCollection": "Pairwise BCCT metrics (effective-rank bitrate, null-calibrated mutual k-NN, CKA, Procrustes-based transport linearity, transport asymmetry, bottleneck mismatch) were computed on features extracted from publicly available pretrained encoders (CLIP, DINOv2, ResNet, ViT, ConvNeXt, MAE, BEiT, MLP-Mixer, Swin, EfficientNet, Pythia, OPT, Phi-2, GPT-2, LLaMA-style models, and others). Source data are CIFAR-100 (vision test, 5000 images), STL-10 (vision robustness check; pseudo-clip rendering for video atlas), WikiText-103 (language, 2000 passages of 128 tokens), and LibriSpeech test-clean (audio). All extraction is deterministic with seed 42.", | |
| "rai:dataCollectionType": ["Derived from public pretrained models and standard benchmarks"], | |
| "rai:dataCollectionTimeFrame": { | |
| "@type": "sc:DateTime", | |
| "@value": "2026-01-01/2026-06-30" | |
| }, | |
| "rai:dataCollectionTimeFrameDescription": "Feature extraction and atlas computation were performed in 2026 (Q1-Q2). Source pretrained-model snapshots and benchmark releases are documented per row in the released JSON.", | |
| "rai:dataAnnotationProtocol": "No human annotation. All numerical values are deterministic outputs of standardized scripts (scripts/extract_features*.py, scripts/compute_alignment_atlas.py, scripts/compute_audio_atlas.py, scripts/compute_video_atlas.py, scripts/llm_experiment.py).", | |
| "rai:dataAnnotationPlatform": "Not applicable (no human annotation).", | |
| "rai:dataAnnotationAnalysis": "Not applicable.", | |
| "rai:dataAnnotatorDemographics": "Not applicable.", | |
| "rai:dataPreprocessingProtocol": "Vision images: resize 256, center-crop 224, ImageNet mean/std normalization. STL-10: 96 to 224 upscale. LibriSpeech: 16 kHz mono, model-specific feature extractors. WikiText-103: 128-token windows, attention-mask-aware mean pooling. All pipelines fixed in scripts/ with seed=42.", | |
| "rai:dataLimitations": "Audio uses a single clean English corpus (LibriSpeech test-clean) and is preliminary at n=15 pairs. Video uses STL-10 pseudo-clips (no genuine temporal structure) and is preliminary at n=15 pairs. The largest LLMs are 7B parameters (Mistral-7B, Falcon-7B, Pythia-6.9B); whether the regime distribution shifts at frontier scale (70B+) is untested. All LLMs in the atlas are base/pretrained-only, no instruction-tuned or RLHF models. The vision atlas is monolingual-equivalent (image-only, no text). Bitrate is reported as an effective-rank covariance proxy, not a calibrated mutual-information estimate. Transport linearity is defined relative to a standardized 2-layer MLP probe and is not an oracle nonlinear comparison. Pre-computed compatibility predictions assume the atlas's feature-extraction protocol; cross-protocol pairs are systematically flagged Divergent (see Appendix G of the accompanying paper).", | |
| "rai:dataBiases": "The 88-paper meta-analysis (data/meta_analysis.csv) is biased toward vision (51/88) over language (16) and audio (3), reflecting publication frequency in representational similarity research up to 2026. The vision atlas is biased toward English-language web/image-corpus pretraining (CLIP, DINOv2 trained on LVD-142M; ImageNet-trained supervised baselines). The LLM atlas is English-only. Family selection is biased toward openly available checkpoints; closed-source models (GPT-4 class) are excluded by necessity, and recent permissively licensed code-generation or multilingual models may be under-represented.", | |
| "rai:personalSensitiveInformation": "The release contains no personal or sensitive information. All four atlases are statistical summaries (similarity scores, regime labels) over public benchmark stimuli (CIFAR-100, STL-10, WikiText-103, LibriSpeech test-clean), and the released feature tensors are deterministic activations of public pretrained models on these public benchmarks. CIFAR-100 and STL-10 contain no personal data. LibriSpeech test-clean contains read public-domain audiobook recordings with speaker IDs released by the LibriVox/LibriSpeech project; we use only model activations, not raw audio. WikiText-103 is derived from Wikipedia featured/good articles. No demographic, health, financial, political, or religious information is collected, inferred, or released.", | |
| "rai:hasSyntheticData": false, | |
| "rai:syntheticDataDescription": "No synthetic data is used or released. All numerical artifacts are deterministic outputs of public pretrained models evaluated on public real-world benchmarks (CIFAR-100, STL-10, WikiText-103, LibriSpeech test-clean).", | |
| "rai:dataUseCases": "Intended uses (validated in the accompanying paper): (a) compatibility screening between candidate model pairs prior to model-stitching or knowledge-transfer experiments; (b) reproducing and auditing the headline statistical findings of the paper (mixed-effects family beta=0.20, block-bootstrap rho=-0.70, retrieval rho=0.76); (c) extending the atlas with new encoders by re-running the extraction pipeline; (d) regression testing for representation-similarity research that builds on CKA, mutual k-NN, Procrustes-based transport, or effective-rank proxies. Out-of-scope uses (NOT validated and explicitly not recommended): (i) deployment-time decisions about whether two production models are interchangeable in a downstream application; (ii) safety or fairness certification of any individual encoder; (iii) inference about model training data, intellectual property, or copyright provenance from feature-space similarity.", | |
| "rai:dataSocialImpact": "Positive impact: standardized evaluation protocols reduce wasted compute on incompatible model pairs, support reproducible representational-similarity research, and surface honest negative findings (e.g., the 35/190 disagreement cases in which CKA looks high but linear transport fails). Negative impact / misuse risk: BCCT scores are diagnostic summaries for comparative research, not deployment guarantees; we explicitly state that BCCT scores must not be used as certifications of safe model interchangeability. Misreading the scorecard as a safety stamp could be harmful when applied to production-critical interchangeability decisions. We mitigate this risk by making the scope statement prominent in the abstract, the conclusion, and the Reviewer Checklist, and by releasing the full per-claim manifest so any user can trace any reported number back to its producing script.", | |
| "rai:dataReleaseMaintenancePlan": "Versioned release at the anonymous URL during review; planned migration to a persistent platform (Hugging Face Datasets and/or OpenML) at camera-ready, with a DOI for the 0.1.0 snapshot. Issue tracker on the public repository will accept extension requests (new encoders) and bug reports (numerical inconsistencies). Version policy: minor releases for added encoders; major releases for atlas-recomputation under different feature-extraction protocols.", | |
| "isLiveDataset": false, | |
| "creator": { | |
| "@type": "Organization", | |
| "name": "Anonymous" | |
| }, | |
| "distribution": [ | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "bcct-hub-root", | |
| "name": "bcct-hub-archive", | |
| "description": "Root archive of the BCCT-Hub release; contains all feature tensors, experiment JSONs, atlas JSONs, scripts, and the paper. The release URL serves an anonymous source archive during NeurIPS 2026 review and a versioned snapshot post camera-ready.", | |
| "contentUrl": "https://anonymous.4open.science/r/bcct-hub", | |
| "encodingFormat": "application/zip", | |
| "sha256": "PENDING_FINAL_REPOSITORY_SNAPSHOT_HASH" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "atlas-vision", | |
| "name": "atlas.json", | |
| "description": "Vision compatibility atlas: 190 pairs across 20 pretrained encoders on CIFAR-100 test (5000 images). Each pair has six BCCT metrics and a regime label.", | |
| "contentUrl": "https://huggingface.co/datasets/EvalData/BCCT-Hub/resolve/main/data/atlas.json", | |
| "encodingFormat": "application/json", | |
| "sha256": "c8826acc8352017e5f3e0c599666f71ee367fdf1677fa9cd4d6c6b64aa9a6a77" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "atlas-llm", | |
| "name": "llm_atlas.json", | |
| "description": "Language compatibility atlas: 36 pairs across 9 base LLMs on WikiText-103 (2000 passages of 128 tokens).", | |
| "contentUrl": "https://huggingface.co/datasets/EvalData/BCCT-Hub/resolve/main/data/llm_atlas.json", | |
| "encodingFormat": "application/json", | |
| "sha256": "56543b9b58b2855a64749aa9a158cdcb9a06fcf7f1bfc662ebe7afb1026b28dd" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "atlas-audio", | |
| "name": "audio_atlas.json", | |
| "description": "Preliminary audio compatibility atlas: 15 pairs across 6 audio encoders on LibriSpeech test-clean.", | |
| "contentUrl": "https://huggingface.co/datasets/EvalData/BCCT-Hub/resolve/main/data/audio_atlas.json", | |
| "encodingFormat": "application/json", | |
| "sha256": "f6c09f96fe301fc8156421a18d10534ea704cea0c598aac87bb9f4e0142c20a5" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "atlas-video", | |
| "name": "video_atlas.json", | |
| "description": "Exploratory video compatibility atlas: 15 pairs across 6 video encoders on STL-10 pseudo-clips.", | |
| "contentUrl": "https://huggingface.co/datasets/EvalData/BCCT-Hub/resolve/main/data/video_atlas.json", | |
| "encodingFormat": "application/json", | |
| "sha256": "189e11c2a70eb7ec96a3dd112d86b9ffa0651745c8d6ddfcec67a041f4b6e752" | |
| }, | |
| { | |
| "@type": "cr:FileObject", | |
| "@id": "meta-analysis-csv", | |
| "name": "meta_analysis.csv", | |
| "description": "88-paper meta-analysis extraction (cite key, year, thread, domain, scale tier, metric type, reported value, inferred BCCT regime, key finding).", | |
| "contentUrl": "https://huggingface.co/datasets/EvalData/BCCT-Hub/resolve/main/data/meta_analysis.csv", | |
| "encodingFormat": "text/csv", | |
| "sha256": "d9e1c0d0eb70ec4d9d4036465643696a0b03222ae0783084c82fda40ed43b0d8" | |
| }, | |
| { | |
| "@type": "cr:FileSet", | |
| "@id": "experiment-results", | |
| "name": "experiments", | |
| "description": "Statistical and downstream-utility analysis JSON outputs (mixed-effects, holdout validation, retrieval, stitching, external scorecard study, threshold sensitivity, probe variance, knn sensitivity, metric ablation, regime clustering, etc.).", | |
| "containedIn": {"@id": "bcct-hub-root"}, | |
| "encodingFormat": "application/json", | |
| "includes": "data/experiments/*.json" | |
| }, | |
| { | |
| "@type": "cr:FileSet", | |
| "@id": "vision-features", | |
| "name": "vision-features", | |
| "description": "Pre-extracted feature tensors for 20 vision encoders on CIFAR-100 test (5000 images).", | |
| "containedIn": {"@id": "bcct-hub-root"}, | |
| "encodingFormat": "application/x-pytorch", | |
| "includes": "data/features/*.pt" | |
| }, | |
| { | |
| "@type": "cr:FileSet", | |
| "@id": "vision-features-train", | |
| "name": "vision-features-train", | |
| "description": "Pre-extracted feature tensors for 20 vision encoders on a CIFAR-100 train subset (seed=42; 5000 images).", | |
| "containedIn": {"@id": "bcct-hub-root"}, | |
| "encodingFormat": "application/x-pytorch", | |
| "includes": "data/features_train/*.pt" | |
| }, | |
| { | |
| "@type": "cr:FileSet", | |
| "@id": "external-features", | |
| "name": "external-features", | |
| "description": "Pre-extracted feature tensors for 5 out-of-atlas vision encoders used in the external scorecard case study (Appendix G).", | |
| "containedIn": {"@id": "bcct-hub-root"}, | |
| "encodingFormat": "application/x-pytorch", | |
| "includes": "data/features_external/**/*.pt" | |
| } | |
| ], | |
| "recordSet": [ | |
| { | |
| "@type": "cr:RecordSet", | |
| "@id": "meta-analysis-records", | |
| "name": "meta-analysis", | |
| "description": "88-paper extraction with thread, domain, scale, metric type, BCCT regime inference, and one-line key finding.", | |
| "field": [ | |
| { | |
| "@type": "cr:Field", | |
| "@id": "meta-analysis-records/cite_key", | |
| "name": "cite_key", | |
| "dataType": "sc:Text", | |
| "source": { | |
| "fileObject": {"@id": "meta-analysis-csv"}, | |
| "extract": {"column": "cite_key"} | |
| } | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "meta-analysis-records/short_name", | |
| "name": "short_name", | |
| "dataType": "sc:Text" | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "meta-analysis-records/year", | |
| "name": "year", | |
| "dataType": "sc:Integer" | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "meta-analysis-records/thread", | |
| "name": "thread", | |
| "description": "One of {convergence, transport, latent_design, theory}.", | |
| "dataType": "sc:Text" | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "meta-analysis-records/domain", | |
| "name": "domain", | |
| "dataType": "sc:Text" | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "meta-analysis-records/metric_type", | |
| "name": "metric_type", | |
| "dataType": "sc:Text" | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "meta-analysis-records/bcct_regime", | |
| "name": "bcct_regime", | |
| "description": "Inferred BCCT regime; 'N/A' when reported evidence is insufficient.", | |
| "dataType": "sc:Text" | |
| }, | |
| { | |
| "@type": "cr:Field", | |
| "@id": "meta-analysis-records/key_finding", | |
| "name": "key_finding", | |
| "dataType": "sc:Text" | |
| } | |
| ] | |
| } | |
| ] | |
| } | |