rag-mixedbread / prepare_cve_corpus.py
Kushalkhemka's picture
Upload rag_mixedbread scripts and artifacts
c980379 verified
#!/usr/bin/env python3
"""
Normalize CVE JSON files into chunked text documents for downstream embeddings.
"""
from __future__ import annotations
import argparse
import json
import textwrap
from pathlib import Path
from typing import Dict, Iterable, List, Sequence, Tuple
from .config import load_settings
def parse_args() -> argparse.Namespace:
settings = load_settings()
parser = argparse.ArgumentParser(description="Prepare CVE corpus.")
parser.add_argument(
"--cve-root",
default=str(settings.cve_root),
help="Directory containing extracted CVE JSON files.",
)
parser.add_argument(
"--output",
default=str(settings.corpus_path),
help="Path to write the normalized JSONL corpus.",
)
parser.add_argument(
"--chunk-size",
type=int,
default=700,
help="Maximum number of characters per chunk.",
)
parser.add_argument(
"--chunk-overlap",
type=int,
default=120,
help="Character overlap between consecutive chunks.",
)
return parser.parse_args()
def chunk_text(text: str, chunk_size: int, overlap: int) -> List[str]:
if not text:
return []
text = " ".join(text.split()) # normalize whitespace
chunks: List[str] = []
start = 0
length = len(text)
while start < length:
end = min(length, start + chunk_size)
chunks.append(text[start:end])
if end == length:
break
start = max(0, end - overlap)
return chunks
def load_json(path: Path):
with path.open("r", encoding="utf-8") as fh:
return json.load(fh)
def iter_raw_entries(path: Path) -> Iterable[Dict]:
data = load_json(path)
if isinstance(data, dict):
if "CVE_Items" in data:
for item in data["CVE_Items"]:
yield item
elif "cve" in data:
yield data
else:
yield data
elif isinstance(data, list):
for item in data:
yield item
def select_lang_values(items: Sequence[Dict], prefer_lang: str = "en") -> List[str]:
preferred = [
item.get("value", "").strip()
for item in items
if item.get("value") and item.get("lang") == prefer_lang
]
if preferred:
return preferred
return [
item.get("value", "").strip()
for item in items
if item.get("value")
]
def dedupe(seq: Sequence[str]) -> List[str]:
seen = set()
result = []
for item in seq:
if not item or item in seen:
continue
seen.add(item)
result.append(item)
return result
def extract_cvss(metrics: Sequence[Dict]) -> Tuple[float | None, str | None, str | None]:
best_score = None
best_vector = None
best_severity = None
for entry in metrics:
for key in ("cvssV3_1", "cvssV3_0", "cvssV2_0"):
if key in entry:
payload = entry[key]
score = payload.get("baseScore")
if score is None:
continue
if best_score is None or score > best_score:
best_score = score
best_vector = payload.get("vectorString")
best_severity = payload.get("baseSeverity")
return best_score, best_vector, best_severity
def extract_problem_types(problem_types: Sequence[Dict]) -> List[str]:
labels: List[str] = []
for entry in problem_types:
for desc in entry.get("descriptions", []):
label = desc.get("cweId") or desc.get("description")
if label:
labels.append(label)
return dedupe(labels)
def summarize_affected(affected: Sequence[Dict]) -> Tuple[List[str], List[str]]:
vendors: List[str] = []
products: List[str] = []
for entry in affected:
vendor = entry.get("vendor")
product = entry.get("product")
if vendor:
vendors.append(vendor)
if product:
products.append(product)
return dedupe(vendors), dedupe(products)
def extract_v5_fields(raw: Dict) -> Dict:
meta = raw.get("cveMetadata", {})
cna = raw.get("containers", {}).get("cna", {})
descriptions = select_lang_values(cna.get("descriptions", []))
description = " ".join(descriptions).strip()
references = [
ref.get("url")
for ref in cna.get("references", [])
if ref.get("url")
]
problem_types = extract_problem_types(cna.get("problemTypes", []))
cwe = next((label for label in problem_types if label.startswith("CWE-")), None)
vendors, products = summarize_affected(cna.get("affected", []))
cvss_score, cvss_vector, severity = extract_cvss(cna.get("metrics", []))
published = meta.get("datePublished")
last_modified = meta.get("dateUpdated")
adp_text = []
for container in raw.get("containers", {}).get("adp", []):
title = container.get("title")
notes = select_lang_values(container.get("descriptions", []))
if title:
adp_text.append(f"{title}: {' '.join(notes) if notes else ''}".strip())
body_parts = [
f"CVE ID: {meta.get('cveId', 'UNKNOWN')}",
f"State: {meta.get('state', 'N/A')}",
f"Published: {published or 'N/A'}",
f"Last Updated: {last_modified or 'N/A'}",
]
if severity or cvss_score:
body_parts.append(
f"Severity: {severity or 'N/A'} (CVSS {cvss_score or 'N/A'} {cvss_vector or ''})".strip()
)
if vendors:
body_parts.append(f"Vendors: {', '.join(vendors)}")
if products:
body_parts.append(f"Products: {', '.join(products)}")
if problem_types:
body_parts.append(f"Problem Types: {', '.join(problem_types)}")
body_parts.extend(
[
"",
"Description:",
description or "No description available.",
]
)
if adp_text:
body_parts.append("")
body_parts.append("Additional Analyst Notes:")
for note in adp_text:
body_parts.append(f"- {note}")
if references:
body_parts.append("")
body_parts.append("References:")
for ref in references:
body_parts.append(f"- {ref}")
flat_text = "\n".join(textwrap.dedent(part).strip() for part in body_parts if part is not None)
return {
"cve_id": meta.get("cveId", "UNKNOWN"),
"cwe": cwe,
"problem_types": problem_types,
"published": published,
"last_modified": last_modified,
"severity": severity,
"cvss_score": cvss_score,
"cvss_vector": cvss_vector,
"vendors": vendors,
"products": products,
"references": references,
"text": flat_text,
"source_file": str(raw.get("_source_file", "unknown")),
}
def extract_legacy_fields(raw: Dict) -> Dict:
cve_id = (
raw.get("cve", {})
.get("CVE_data_meta", {})
.get("ID")
or raw.get("cve_id")
or raw.get("id")
)
description = ""
if "cve" in raw:
desc_data = raw["cve"].get("description", {}).get("description_data", [])
description = " ".join(item.get("value", "") for item in desc_data)
elif "description" in raw:
if isinstance(raw["description"], dict):
description = raw["description"].get("description_data", "")
else:
description = str(raw["description"])
references = []
if "cve" in raw:
refs = raw["cve"].get("references", {}).get("reference_data", [])
references = [ref.get("url") for ref in refs if ref.get("url")]
elif "references" in raw and isinstance(raw["references"], list):
references = [
ref if isinstance(ref, str) else ref.get("url")
for ref in raw["references"]
]
cwe = None
problemtype = raw.get("cve", {}).get("problemtype", {}).get("problemtype_data", [])
if problemtype:
descriptions = problemtype[0].get("description", [])
if descriptions:
cwe = descriptions[0].get("value")
published = raw.get("publishedDate") or raw.get("published")
last_modified = raw.get("lastModifiedDate") or raw.get("last_modified")
body_parts = [
f"CVE ID: {cve_id or 'UNKNOWN'}",
f"CWE: {cwe or 'N/A'}",
f"Published: {published or 'N/A'}",
f"Last Modified: {last_modified or 'N/A'}",
"",
"Description:",
description or "No description available.",
]
if references:
body_parts.append("")
body_parts.append("References:")
for ref in references:
body_parts.append(f"- {ref}")
flat_text = "\n".join(textwrap.dedent(part).strip() for part in body_parts)
return {
"cve_id": cve_id or "UNKNOWN",
"cwe": cwe,
"problem_types": [cwe] if cwe else [],
"published": published,
"last_modified": last_modified,
"severity": None,
"cvss_score": None,
"cvss_vector": None,
"vendors": [],
"products": [],
"references": references,
"text": flat_text,
"source_file": str(raw.get("_source_file", "unknown")),
}
def normalize_entry(raw: Dict) -> Dict:
if "containers" in raw and "cveMetadata" in raw:
return extract_v5_fields(raw)
return extract_legacy_fields(raw)
def prepare_entries(cve_root: Path) -> Iterable[Dict]:
json_files = sorted(cve_root.rglob("*.json"))
for path in json_files:
for raw in iter_raw_entries(path):
raw["_source_file"] = path
yield normalize_entry(raw)
def write_corpus(
entries: Iterable[Dict], output_path: Path, chunk_size: int, overlap: int
) -> Tuple[int, int]:
output_path.parent.mkdir(parents=True, exist_ok=True)
doc_count = 0
chunk_count = 0
with output_path.open("w", encoding="utf-8") as writer:
for entry in entries:
doc_count += 1
chunks = chunk_text(entry["text"], chunk_size, overlap)
for idx, chunk in enumerate(chunks):
chunk_count += 1
record = {
"cve_id": entry["cve_id"],
"chunk_id": idx,
"text": chunk,
"metadata": {
"cwe": entry["cwe"],
"problem_types": entry.get("problem_types"),
"published": entry["published"],
"last_modified": entry["last_modified"],
"severity": entry.get("severity"),
"cvss_score": entry.get("cvss_score"),
"cvss_vector": entry.get("cvss_vector"),
"vendors": entry.get("vendors"),
"products": entry.get("products"),
"references": entry["references"],
"source_file": entry["source_file"],
},
}
writer.write(json.dumps(record) + "\n")
return doc_count, chunk_count
def main() -> None:
args = parse_args()
cve_root = Path(args.cve_root)
if not cve_root.exists():
raise FileNotFoundError(
f"CVE root {cve_root} not found. Run scripts/unzip_cvelist.py first."
)
entries = prepare_entries(cve_root)
docs, chunks = write_corpus(entries, Path(args.output), args.chunk_size, args.chunk_overlap)
print(f"Wrote {chunks} chunks from {docs} CVE entries to {args.output}")
if __name__ == "__main__":
main()