llm_recommendation_backend / data /catalog_loader.py
AgamP's picture
Upload catalog_loader.py
5a51990 verified
from __future__ import annotations
import hashlib
import json
from pathlib import Path
from typing import Dict, Tuple
import pandas as pd
from crawler.utils import canonicalize_url
def make_assessment_id(url: str) -> str:
canonical = canonicalize_url(url.lower())
return hashlib.sha1(canonical.encode("utf-8")).hexdigest()
def to_solutions_url(url: str) -> str:
"""Ensure outgoing URLs include the /solutions/ prefix for compatibility with labels/eval."""
return url.replace("/products/product-catalog", "/solutions/products/product-catalog")
def load_catalog(path: str) -> Tuple[pd.DataFrame, Dict[str, dict], Dict[str, str]]:
p = Path(path)
if not p.exists():
raise FileNotFoundError(f"Catalog file not found: {path}")
if p.suffix == ".jsonl":
df = pd.read_json(p, lines=True)
elif p.suffix in {".parquet", ".pq"}:
df = pd.read_parquet(p)
else:
raise ValueError(f"Unsupported catalog format: {p}")
df["url_canonical"] = df["url"].apply(lambda u: canonicalize_url(str(u).lower()))
df["assessment_id"] = df["url_canonical"].apply(make_assessment_id)
df["url_recommend"] = df["url"].apply(to_solutions_url)
if "duration" in df.columns and "duration_minutes" not in df.columns:
df["duration_minutes"] = df["duration"]
for col in ["remote_support", "adaptive_support"]:
if col in df.columns:
df[col] = df[col].fillna(False).astype(bool)
catalog_by_id = {row.assessment_id: row._asdict() if hasattr(row, "_asdict") else row.to_dict() for _, row in df.iterrows()}
id_by_url = {}
for canonical, aid in zip(df["url_canonical"], df["assessment_id"]):
id_by_url[canonical] = aid
alt_products = canonical.replace("/solutions/products/product-catalog", "/products/product-catalog")
alt_solutions = canonical.replace("/products/product-catalog", "/solutions/products/product-catalog")
id_by_url.setdefault(alt_products, aid)
id_by_url.setdefault(alt_solutions, aid)
return df, catalog_by_id, id_by_url
def save_catalog_with_ids(input_path: str, output_path: str) -> None:
df, _, _ = load_catalog(input_path)
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
if output_path.endswith(".jsonl"):
df.to_json(output_path, orient="records", lines=True)
else:
df.to_parquet(output_path, index=False)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True)
parser.add_argument("--output", required=True)
args = parser.parse_args()
save_catalog_with_ids(args.input, args.output)