| """Build industry-level XBRL ontology + company-level extracted fields. |
| |
| Reads raw XBRL company facts (``data/xbrl/raw/{TICKER}.json``) and the |
| universe file to: |
| |
| 1. **Parse** all 10-K / 10-Q facts into a normalised table. |
| 2. **Group** by sector and industry (from ``company_info.csv``). |
| 3. **Classify** each tag per industry: |
| - **core** – appears in ≥70 % of companies in the industry |
| - **common** – appears in ≥30 % |
| - **extension** – appears in <30 % (often company-specific XBRL extensions) |
| 4. **Output**: |
| - ``data/xbrl/parsed/company_facts.parquet`` – all extracted facts |
| - ``data/xbrl/parsed/company_tags.parquet`` – per-company tag list (latest value) |
| - ``data/xbrl/ontology/industry_ontology.json`` – per-industry tag classification |
| - ``data/xbrl/ontology/tag_catalog.parquet`` – master tag catalog with labels |
| """ |
|
|
| from __future__ import annotations |
|
|
| import json |
| import logging |
| from collections import defaultdict |
| from pathlib import Path |
|
|
| import numpy as np |
| import pandas as pd |
|
|
| from . import config |
|
|
| logger = logging.getLogger(__name__) |
|
|
| _RAW_DIR = config.XBRL_DIR / "raw" |
| _PARSED_DIR = config.XBRL_DIR / "parsed" |
| _ONTOLOGY_DIR = config.XBRL_DIR / "ontology" |
|
|
|
|
| |
| |
| |
|
|
| def _parse_single_company( |
| ticker: str, |
| path: Path, |
| allowed_forms: set[str], |
| ) -> tuple[list[dict], dict[str, dict]]: |
| """Parse one company's raw XBRL JSON. |
| |
| Returns |
| ------- |
| facts : list[dict] |
| Flat rows of (ticker, taxonomy, tag, label, unit, period_start, |
| period_end, value, form, fiscal_year, fiscal_period, filed). |
| tag_meta : dict[str, dict] |
| ``{taxonomy:tag: {label, description, taxonomy}}`` |
| """ |
| try: |
| raw = json.loads(path.read_text(encoding="utf-8")) |
| except (json.JSONDecodeError, UnicodeDecodeError): |
| logger.warning("Corrupt JSON for %s, skipping", ticker) |
| return [], {} |
|
|
| if raw.get("_no_xbrl"): |
| return [], {} |
|
|
| facts_root = raw.get("facts", {}) |
| rows: list[dict] = [] |
| tag_meta: dict[str, dict] = {} |
|
|
| for taxonomy, tags in facts_root.items(): |
| for tag_name, tag_data in tags.items(): |
| label = tag_data.get("label") or tag_name |
| description = tag_data.get("description") or "" |
|
|
| meta_key = f"{taxonomy}:{tag_name}" |
| if meta_key not in tag_meta: |
| tag_meta[meta_key] = { |
| "taxonomy": taxonomy, |
| "tag": tag_name, |
| "label": label, |
| "description": str(description), |
| } |
|
|
| units = tag_data.get("units", {}) |
| for unit_name, entries in units.items(): |
| for entry in entries: |
| form = entry.get("form", "") |
| if form not in allowed_forms: |
| continue |
|
|
| rows.append({ |
| "ticker": ticker, |
| "taxonomy": taxonomy, |
| "tag": tag_name, |
| "label": label, |
| "unit": unit_name, |
| "period_start": entry.get("start"), |
| "period_end": entry.get("end"), |
| "value": entry.get("val"), |
| "form": form, |
| "fiscal_year": entry.get("fy"), |
| "fiscal_period": entry.get("fp"), |
| "filed": entry.get("filed"), |
| "accession": entry.get("accn", ""), |
| }) |
|
|
| return rows, tag_meta |
|
|
|
|
| def _parse_all_companies() -> tuple[pd.DataFrame, pd.DataFrame]: |
| """Parse all raw XBRL JSON files. |
| |
| Returns ``(facts_df, tag_catalog_df)`` |
| """ |
| if not _RAW_DIR.exists(): |
| raise FileNotFoundError( |
| f"XBRL raw directory not found: {_RAW_DIR}. Run collect_filings first (Step 4)." |
| ) |
|
|
| json_files = sorted(_RAW_DIR.glob("*.json")) |
| if not json_files: |
| raise FileNotFoundError("No XBRL JSON files found in " + str(_RAW_DIR)) |
|
|
| allowed_forms = set(config.XBRL_FORMS) |
| all_rows: list[dict] = [] |
| all_meta: dict[str, dict] = {} |
|
|
| for i, path in enumerate(json_files): |
| ticker = path.stem |
| rows, meta = _parse_single_company(ticker, path, allowed_forms) |
| all_rows.extend(rows) |
| all_meta.update(meta) |
|
|
| if (i + 1) % 500 == 0: |
| logger.info(" Parsed %d / %d companies (%d facts so far)", |
| i + 1, len(json_files), len(all_rows)) |
|
|
| logger.info( |
| "Parsed %d companies → %d facts, %d unique tags", |
| len(json_files), len(all_rows), len(all_meta), |
| ) |
|
|
| facts_df = pd.DataFrame(all_rows) |
| if not facts_df.empty: |
| for col in ("period_start", "period_end", "filed"): |
| facts_df[col] = pd.to_datetime(facts_df[col], errors="coerce") |
|
|
| tag_catalog = pd.DataFrame(list(all_meta.values())) |
| return facts_df, tag_catalog |
|
|
|
|
| |
| |
| |
|
|
| def _load_industry_map() -> dict[str, tuple[str, str]]: |
| """Load ticker → (sector, industry) from company_info.csv.""" |
| ci_path = config.FUNDAMENTALS_DIR / "company_info.csv" |
| if not ci_path.exists(): |
| logger.warning("company_info.csv not found; falling back to universe sectors") |
| u_path = config.UNIVERSE_DIR / "benchmark_universe.csv" |
| if not u_path.exists(): |
| return {} |
| u = pd.read_csv(u_path) |
| return { |
| row["ticker"]: (str(row.get("sector", "Unknown")), "Unknown") |
| for _, row in u.iterrows() |
| } |
|
|
| ci = pd.read_csv(ci_path) |
| return { |
| row["ticker"]: ( |
| str(row.get("sector", "Unknown")), |
| str(row.get("industry", "Unknown")), |
| ) |
| for _, row in ci.iterrows() |
| } |
|
|
|
|
| def _build_ontology( |
| facts_df: pd.DataFrame, |
| industry_map: dict[str, tuple[str, str]], |
| ) -> dict: |
| """Build the industry-level ontology. |
| |
| Returns a nested dict:: |
| |
| { |
| "by_sector": { |
| "Healthcare": { |
| "company_count": 515, |
| "tag_count": 1234, |
| "tags": { |
| "us-gaap:Revenue": { |
| "label": "Revenue", |
| "coverage": 0.95, |
| "classification": "core", |
| "median_value": 123456789, |
| "industries": ["Biotechnology", "Medical Devices", ...] |
| }, |
| ... |
| } |
| }, |
| ... |
| }, |
| "by_industry": { |
| "Biotechnology": { |
| "sector": "Healthcare", |
| "company_count": 238, |
| "tag_count": 567, |
| "tags": { ... } |
| }, |
| ... |
| } |
| } |
| """ |
| if facts_df.empty: |
| return {"by_sector": {}, "by_industry": {}} |
|
|
| |
| facts_df = facts_df.copy() |
| facts_df["sector"] = facts_df["ticker"].map( |
| lambda t: industry_map.get(t, ("Unknown", "Unknown"))[0] |
| ) |
| facts_df["industry"] = facts_df["ticker"].map( |
| lambda t: industry_map.get(t, ("Unknown", "Unknown"))[1] |
| ) |
|
|
| |
| facts_df["tag_key"] = facts_df["taxonomy"] + ":" + facts_df["tag"] |
|
|
| core_thresh = config.XBRL_CORE_THRESHOLD |
| common_thresh = config.XBRL_COMMON_THRESHOLD |
|
|
| def _classify_tags( |
| group_facts: pd.DataFrame, |
| group_name: str, |
| ) -> dict: |
| """Classify tags within a group (sector or industry).""" |
| company_count = group_facts["ticker"].nunique() |
| if company_count == 0: |
| return { |
| "company_count": 0, |
| "tag_count": 0, |
| "tags": {}, |
| } |
|
|
| |
| tag_company_counts = ( |
| group_facts.groupby("tag_key")["ticker"] |
| .nunique() |
| .to_dict() |
| ) |
| |
| lab_vc = group_facts.groupby(["tag_key", "label"]).size().reset_index(name="n") |
| lab_idx = lab_vc.groupby("tag_key")["n"].idxmax() |
| tag_labels = dict( |
| zip(lab_vc.loc[lab_idx, "tag_key"].values, lab_vc.loc[lab_idx, "label"].values) |
| ) |
| |
| val_numeric = pd.to_numeric(group_facts["value"], errors="coerce") |
| gf = group_facts.assign(_vn=val_numeric).dropna(subset=["_vn"]) |
| median_values: dict = {} |
| if not gf.empty: |
| med_by_fy = gf.groupby(["tag_key", "fiscal_year"])["_vn"].median() |
| latest_fy_series = gf.groupby("tag_key")["fiscal_year"].max() |
| for tk, fy in latest_fy_series.items(): |
| if (tk, fy) in med_by_fy.index: |
| median_values[tk] = float(med_by_fy.loc[(tk, fy)]) |
|
|
| tags: dict[str, dict] = {} |
| for tag_key, n_companies in tag_company_counts.items(): |
| coverage = n_companies / company_count |
| if coverage >= core_thresh: |
| classification = "core" |
| elif coverage >= common_thresh: |
| classification = "common" |
| else: |
| classification = "extension" |
|
|
| tags[tag_key] = { |
| "label": tag_labels.get(tag_key, ""), |
| "company_count": int(n_companies), |
| "coverage": round(coverage, 4), |
| "classification": classification, |
| "median_value": median_values.get(tag_key), |
| } |
|
|
| return { |
| "company_count": int(company_count), |
| "tag_count": len(tags), |
| "tags": dict(sorted( |
| tags.items(), |
| key=lambda x: (-x[1]["coverage"], x[0]), |
| )), |
| } |
|
|
| |
| ontology_by_sector: dict = {} |
| for sector, sector_facts in facts_df.groupby("sector"): |
| logger.info(" Building ontology for sector: %s", sector) |
| ontology_by_sector[sector] = _classify_tags(sector_facts, sector) |
|
|
| |
| ontology_by_industry: dict = {} |
| for industry, ind_facts in facts_df.groupby("industry"): |
| sector = ind_facts["sector"].mode() |
| sector_name = sector.iloc[0] if len(sector) > 0 else "Unknown" |
| result = _classify_tags(ind_facts, industry) |
| result["sector"] = sector_name |
| ontology_by_industry[industry] = result |
|
|
| return { |
| "by_sector": ontology_by_sector, |
| "by_industry": ontology_by_industry, |
| } |
|
|
|
|
| |
| |
| |
|
|
| def _build_company_tags(facts_df: pd.DataFrame) -> pd.DataFrame: |
| """For each company, extract the latest value per tag. |
| |
| Returns a DataFrame with columns: |
| ticker, taxonomy, tag, label, unit, value, fiscal_year, fiscal_period, filed |
| """ |
| if facts_df.empty: |
| return pd.DataFrame() |
|
|
| |
| idx = facts_df.groupby(["ticker", "taxonomy", "tag", "unit"])["filed"].idxmax() |
| latest = facts_df.loc[idx].copy() |
| latest = latest.sort_values(["ticker", "taxonomy", "tag"]) |
| return latest[ |
| ["ticker", "taxonomy", "tag", "label", "unit", "value", |
| "fiscal_year", "fiscal_period", "filed"] |
| ].reset_index(drop=True) |
|
|
|
|
| |
| |
| |
|
|
| def run() -> dict[str, int]: |
| """Build XBRL ontology from collected company facts. |
| |
| Returns summary dict with counts. |
| """ |
| _PARSED_DIR.mkdir(parents=True, exist_ok=True) |
| _ONTOLOGY_DIR.mkdir(parents=True, exist_ok=True) |
|
|
| |
| |
| ontology_path = _ONTOLOGY_DIR / "industry_ontology.json" |
| facts_path = _PARSED_DIR / "company_facts.parquet" |
| if ontology_path.exists() and ontology_path.stat().st_size > 1000 and facts_path.exists() and facts_path.stat().st_size > 1000: |
| ont = json.loads(ontology_path.read_text()) |
| |
| |
| |
| if isinstance(ont, dict): |
| n_sectors = len(ont.get("by_sector", {})) |
| n_industries = len(ont.get("by_industry", {})) |
| else: |
| n_sectors = 0 |
| n_industries = 0 |
| logger.info("Ontology already exists (%d sectors, %d industries). Skipping rebuild.", |
| n_sectors, n_industries) |
| tags = pd.read_parquet(_ONTOLOGY_DIR / "tag_catalog.parquet") if (_ONTOLOGY_DIR / "tag_catalog.parquet").exists() else pd.DataFrame() |
| facts = pd.read_parquet(facts_path) |
| return { |
| "facts": len(facts), |
| "unique_tags": len(tags), |
| "companies": facts["ticker"].nunique() if "ticker" in facts.columns else 0, |
| "sectors": n_sectors, |
| "industries": n_industries, |
| } |
|
|
| |
| logger.info("Parsing raw XBRL company facts…") |
| facts_df, tag_catalog = _parse_all_companies() |
|
|
| |
| facts_path = _PARSED_DIR / "company_facts.parquet" |
| if not facts_df.empty: |
| facts_df.to_parquet(facts_path, index=False) |
| logger.info("Saved %d facts to %s", len(facts_df), facts_path) |
| else: |
| logger.warning("No facts parsed — empty output") |
| return {"facts": 0, "tags": 0, "sectors": 0, "industries": 0} |
|
|
| |
| catalog_path = _ONTOLOGY_DIR / "tag_catalog.parquet" |
| tag_catalog.to_parquet(catalog_path, index=False) |
| logger.info("Saved %d unique tags to %s", len(tag_catalog), catalog_path) |
|
|
| |
| logger.info("Building industry ontology…") |
| industry_map = _load_industry_map() |
| ontology = _build_ontology(facts_df, industry_map) |
|
|
| ontology_path = _ONTOLOGY_DIR / "industry_ontology.json" |
| with open(ontology_path, "w", encoding="utf-8") as fh: |
| json.dump(ontology, fh, indent=2, ensure_ascii=False, default=str) |
| logger.info("Saved ontology to %s", ontology_path) |
|
|
| |
| logger.info("Building company-level tag summaries…") |
| company_tags = _build_company_tags(facts_df) |
| company_tags_path = _PARSED_DIR / "company_tags.parquet" |
| company_tags.to_parquet(company_tags_path, index=False) |
| logger.info("Saved %d company-tag rows to %s", len(company_tags), company_tags_path) |
|
|
| n_sectors = len(ontology.get("by_sector", {})) |
| n_industries = len(ontology.get("by_industry", {})) |
|
|
| summary = { |
| "facts": len(facts_df), |
| "unique_tags": len(tag_catalog), |
| "companies": facts_df["ticker"].nunique(), |
| "sectors": n_sectors, |
| "industries": n_industries, |
| } |
| logger.info("Ontology build complete: %s", summary) |
|
|
| |
| for sector, data in sorted(ontology.get("by_sector", {}).items()): |
| core = sum(1 for t in data["tags"].values() if t["classification"] == "core") |
| common = sum(1 for t in data["tags"].values() if t["classification"] == "common") |
| ext = sum(1 for t in data["tags"].values() if t["classification"] == "extension") |
| logger.info( |
| " %s: %d companies, %d tags (core=%d, common=%d, extension=%d)", |
| sector, data["company_count"], data["tag_count"], core, common, ext, |
| ) |
|
|
| return summary |
|
|