#!/usr/bin/env python3 """Generate HuggingFace dataset card and Croissant JSON-LD metadata. Reads parquet/JSONL export files, extracts schema and statistics, and generates: - exports/README.md (HuggingFace dataset card) - exports/croissant.json (MLCommons Croissant 1.0 metadata) Usage: python scripts/generate_dataset_card.py """ import json from pathlib import Path PROJECT_ROOT = Path(__file__).resolve().parent.parent EXPORTS_DIR = PROJECT_ROOT / "exports" # Try importing pyarrow for parquet schema extraction try: import pyarrow.parquet as pq HAS_PYARROW = True except ImportError: HAS_PYARROW = False def get_parquet_info(path: Path) -> dict: """Extract row count and schema from a parquet file.""" if not HAS_PYARROW or not path.exists(): return {"rows": "N/A", "columns": []} pf = pq.ParquetFile(path) schema = pf.schema_arrow return { "rows": pf.metadata.num_rows, "columns": [ {"name": field.name, "type": str(field.type)} for field in schema ], } def count_jsonl(path: Path) -> int: """Count lines in a JSONL file.""" if not path.exists(): return 0 with open(path) as f: return sum(1 for _ in f) def format_size(path: Path) -> str: """Human-readable file size.""" if not path.exists(): return "N/A" size = path.stat().st_size for unit in ["B", "KB", "MB", "GB"]: if size < 1024: return f"{size:.1f} {unit}" size /= 1024 return f"{size:.1f} TB" def generate_readme() -> str: """Generate HuggingFace dataset card content.""" sections = [] # YAML frontmatter sections.append("""--- license: cc-by-sa-4.0 language: - en tags: - drug-target-interaction - clinical-trials - protein-protein-interaction - negative-results - benchmark - bioinformatics task_categories: - text-classification - question-answering - text-generation size_categories: - 10M dict: """Generate MLCommons Croissant JSON-LD metadata.""" croissant = { "@context": { "@vocab": "https://schema.org/", "sc": "https://schema.org/", "cr": "http://mlcommons.org/croissant/", "rai": "http://mlcommons.org/croissant/RAI/", }, "@type": "sc:Dataset", "name": "NegBioDB", "description": ( "A large-scale database of experimentally confirmed negative results " "across three biomedical domains (DTI, Clinical Trials, PPI), " "with dual ML/LLM benchmarks." ), "license": "https://creativecommons.org/licenses/by-sa/4.0/", "url": "https://github.com/jang1563/NegBioDB", "version": "1.0.0", "datePublished": "2026", "creator": { "@type": "sc:Person", "name": "Jungwon Jang", }, "distribution": [], "recordSet": [], } # File objects (distribution) file_defs = [ { "name": "dti_pairs", "contentUrl": "negbiodb_dti_pairs.parquet", "encodingFormat": "application/x-parquet", "description": "All negative DTI pairs with source, tier, and activity data", }, { "name": "dti_m1_balanced", "contentUrl": "negbiodb_m1_balanced.parquet", "encodingFormat": "application/x-parquet", "description": "DTI M1 balanced benchmark dataset (1:1 positive:negative)", }, { "name": "ct_pairs", "contentUrl": "ct/negbiodb_ct_pairs.parquet", "encodingFormat": "application/x-parquet", "description": "All clinical trial failure pairs", }, { "name": "ppi_pairs", "contentUrl": "ppi/negbiodb_ppi_pairs.parquet", "encodingFormat": "application/x-parquet", "description": "All negative PPI pairs", }, { "name": "llm_l1", "contentUrl": "llm_benchmarks/l1_mcq.jsonl", "encodingFormat": "application/jsonl", "description": "L1 MCQ classification benchmark", }, { "name": "llm_l4", "contentUrl": "llm_benchmarks/l4_tested_untested.jsonl", "encodingFormat": "application/jsonl", "description": "L4 tested/untested discrimination benchmark", }, ] for fd in file_defs: croissant["distribution"].append({ "@type": "cr:FileObject", "name": fd["name"], "contentUrl": fd["contentUrl"], "encodingFormat": fd["encodingFormat"], "description": fd["description"], }) # Record sets (key columns) record_defs = [ { "name": "dti_pairs_record", "source": "dti_pairs", "fields": [ {"name": "inchikey_connectivity", "dataType": "sc:Text"}, {"name": "uniprot_id", "dataType": "sc:Text"}, {"name": "activity_type", "dataType": "sc:Text"}, {"name": "pchembl_value", "dataType": "sc:Float"}, {"name": "source", "dataType": "sc:Text"}, {"name": "tier", "dataType": "sc:Text"}, ], }, { "name": "ct_pairs_record", "source": "ct_pairs", "fields": [ {"name": "nct_id", "dataType": "sc:Text"}, {"name": "intervention_name", "dataType": "sc:Text"}, {"name": "failure_category", "dataType": "sc:Text"}, {"name": "tier", "dataType": "sc:Text"}, {"name": "highest_phase_reached", "dataType": "sc:Text"}, ], }, { "name": "ppi_pairs_record", "source": "ppi_pairs", "fields": [ {"name": "protein_a", "dataType": "sc:Text"}, {"name": "protein_b", "dataType": "sc:Text"}, {"name": "source", "dataType": "sc:Text"}, {"name": "tier", "dataType": "sc:Text"}, ], }, ] for rd in record_defs: croissant["recordSet"].append({ "@type": "cr:RecordSet", "name": rd["name"], "source": rd["source"], "field": [ { "@type": "cr:Field", "name": f["name"], "dataType": f["dataType"], "description": f["name"].replace("_", " "), } for f in rd["fields"] ], }) return croissant def main(): # Generate README readme_text = generate_readme() readme_path = EXPORTS_DIR / "README.md" readme_path.write_text(readme_text) print(f"Written: {readme_path}") print(f" Lines: {len(readme_text.splitlines())}") # Generate Croissant croissant = generate_croissant() croissant_path = EXPORTS_DIR / "croissant.json" with open(croissant_path, "w") as f: json.dump(croissant, f, indent=2) print(f"Written: {croissant_path}") # Verify: check all referenced files exist missing = [] for dist in croissant["distribution"]: fpath = EXPORTS_DIR / dist["contentUrl"] if not fpath.exists(): missing.append(dist["contentUrl"]) if missing: print(f"\n WARNING: {len(missing)} referenced files not found:") for m in missing: print(f" {m}") else: print("\n All referenced files exist.") if __name__ == "__main__": main()