| | |
| | """Expose inferred workflow labels for APEX-Agents tasks. |
| | |
| | The public `tasks_and_rubrics.json` includes `domain` metadata but omits |
| | workflow/sub-task labels. This script infers per-domain workflows from task |
| | text and task file names, then applies quota-constrained assignment so each |
| | domain matches the workflow distribution reported in Table 6 of the paper. |
| | """ |
| |
|
| | from __future__ import annotations |
| |
|
| | import argparse |
| | import csv |
| | import json |
| | import math |
| | import re |
| | from collections import Counter, defaultdict |
| | from pathlib import Path |
| | from typing import DefaultDict |
| |
|
| | import numpy as np |
| | from scipy.optimize import linear_sum_assignment |
| |
|
| |
|
| | DOMAIN_WORKFLOW_QUOTAS = { |
| | "Investment Banking": { |
| | "Comparables": 16, |
| | "DCF": 42, |
| | "Debt Model": 6, |
| | "LBO": 12, |
| | "Market / Sector Research": 3, |
| | "Merger Model": 7, |
| | "Sensitivity Analysis": 46, |
| | "Valuation Analysis": 28, |
| | }, |
| | "Management Consulting": { |
| | "Benchmarking / Competitive Analysis": 26, |
| | "Cost Benefit Analysis": 11, |
| | "Market Sizing, TAM, SAM": 14, |
| | "Operations Analysis": 23, |
| | "Scenario/Sensitivity Analysis": 35, |
| | "Strategy Recommendations": 5, |
| | "Survey / Interview Analysis": 31, |
| | "Variance / Performance Analysis": 15, |
| | }, |
| | "Law": { |
| | "Compliance Program Review": 16, |
| | "Contract Review": 30, |
| | "Due Diligence": 18, |
| | "Internal Investigations": 3, |
| | "Legal Research": 47, |
| | "Litigation Strategy": 8, |
| | "Motion Drafting": 6, |
| | "Risk Assessment": 24, |
| | "Other": 8, |
| | }, |
| | } |
| |
|
| |
|
| | DOMAIN_BASE_PRIORS = { |
| | "Investment Banking": { |
| | "Comparables": 0.12, |
| | "DCF": 0.25, |
| | "Debt Model": 0.05, |
| | "LBO": 0.12, |
| | "Market / Sector Research": 0.05, |
| | "Merger Model": 0.07, |
| | "Sensitivity Analysis": 0.22, |
| | "Valuation Analysis": 0.18, |
| | }, |
| | "Management Consulting": { |
| | "Benchmarking / Competitive Analysis": 0.18, |
| | "Cost Benefit Analysis": 0.08, |
| | "Market Sizing, TAM, SAM": 0.12, |
| | "Operations Analysis": 0.16, |
| | "Scenario/Sensitivity Analysis": 0.2, |
| | "Strategy Recommendations": 0.06, |
| | "Survey / Interview Analysis": 0.15, |
| | "Variance / Performance Analysis": 0.12, |
| | }, |
| | "Law": { |
| | "Compliance Program Review": 0.1, |
| | "Contract Review": 0.2, |
| | "Due Diligence": 0.1, |
| | "Internal Investigations": 0.1, |
| | "Legal Research": 0.2, |
| | "Litigation Strategy": 0.1, |
| | "Motion Drafting": 0.1, |
| | "Risk Assessment": 0.2, |
| | "Other": 0.1, |
| | }, |
| | } |
| |
|
| |
|
| | |
| | DOMAIN_SIGNALS = { |
| | "Investment Banking": { |
| | "Comparables": [ |
| | ("comparables_term", r"\bcomparables?\b|\bcomps?\b|\bpublic comparables?\b", 4.2), |
| | ("precedent_transactions", r"\bprecedent transactions?\b|\bprecedents?\b", 3.2), |
| | ("peer_group", r"\bpeer group\b|\bpeer set\b|\bpeer analysis\b", 2.6), |
| | ("multiple_focus", r"\bev/?ebitda\b|\bev/?ebit\b|\bp/?e\b|\bev/?fcf\b|\btrading multiples?\b", 1.8), |
| | ], |
| | "DCF": [ |
| | ("dcf_term", r"\bdcf\b|\bdiscounted cash flow\b", 4.6), |
| | ("wacc_terminal", r"\bwacc\b|\bterminal value\b|\bterminal growth\b|\bperpetuity\b", 2.7), |
| | ("discount_build", r"\bcost of equity\b|\brisk[- ]free rate\b|\bbeta\b|\bequity risk premium\b", 2.3), |
| | ("discounted_fcf", r"\bdiscounted free cash flow\b|\b(un)?levered free cash flow\b|\bpv of free cash flows?\b", 2.2), |
| | ], |
| | "Debt Model": [ |
| | ("debt_model_term", r"\bdebt model\b|\bdebt schedule\b", 4.8), |
| | ("debt_instruments", r"\brevolver\b|\bterm loan\b|\bdebenture\b|\bcoupon\b|\bamortization\b|\bprincipal\b", 3.0), |
| | ("debt_metrics", r"\binterest coverage\b|\bleverage ratio\b|\bnet debt\b|\bdebt capacity\b", 2.4), |
| | ], |
| | "LBO": [ |
| | ("lbo_term", r"\blbo\b|\bleveraged buyout\b", 4.8), |
| | ("returns_terms", r"\birr\b|\bmoic\b|\bsponsor equity\b", 3.5), |
| | ("entry_exit_terms", r"\bentry multiple\b|\bexit multiple\b|\bpremium paid\b|\bability to pay\b", 2.2), |
| | ], |
| | "Market / Sector Research": [ |
| | ("market_sector_research_term", r"\bmarket (?:or )?sector research\b|\bsector research\b", 4.2), |
| | ("industry_outlook", r"\bindustry outlook\b|\bsector outlook\b|\bmarket outlook\b", 3.1), |
| | ("addressable_market", r"\baddressable market\b|\btam\b|\bsam\b", 2.0), |
| | ], |
| | "Merger Model": [ |
| | ("merger_model_term", r"\bmerger model\b|\baccretion dilution model\b", 4.5), |
| | ("accretion_dilution", r"\baccretion\b|\bdilution\b", 3.6), |
| | ("proforma_exchange", r"\bpro[\s-]?forma\b|\bexchange ratio\b|\bcombined company\b", 3.0), |
| | ("consideration_mix", r"\bcash consideration\b|\bstock consideration\b|\bstock portion\b", 2.1), |
| | ], |
| | "Sensitivity Analysis": [ |
| | ("sensitivity_term", r"\bsensitivity\b|\bscenario\b", 3.4), |
| | ("scenario_cases", r"\bupside\b|\bdownside\b|\blow case\b|\bmid case\b|\bhigh case\b", 2.7), |
| | ("shock_flex", r"\bshock\b|\bstep-?up\b|\bstep-?down\b|\bflex(?:ing)?\b|\bcritical point\b", 2.6), |
| | ("assumption_change", r"\bassuming\b|\badjust\b|\bwhat if\b", 1.4), |
| | ], |
| | "Valuation Analysis": [ |
| | ("valuation_term", r"\bvaluation\b|\bfair value\b", 2.9), |
| | ("implied_value", r"\bimplied share price\b|\benterprise value\b|\bprice per share\b|\boffer price\b", 2.6), |
| | ("premium_discount", r"\bpremium\b|\bdiscount\b|\bimplied upside/downside\b", 2.0), |
| | ("npv_present_value", r"\bnpv\b|\bnet present value\b|\bpresent value\b", 1.8), |
| | ], |
| | }, |
| | "Management Consulting": { |
| | "Benchmarking / Competitive Analysis": [ |
| | ("benchmarking_term", r"\bbenchmark(?:ing)?\b", 3.7), |
| | ("competitive_term", r"\bcompetitive\b|\bcompetitor(?:s)?\b|\bpeer(?:s)?\b|\blandscape\b", 3.0), |
| | ("ranking_compare", r"\brank(?:ing)?\b|\bcompare\b|\bversus\b|\bagainst\b", 1.9), |
| | ("market_share_compare", r"\bmarket share\b", 1.7), |
| | ], |
| | "Cost Benefit Analysis": [ |
| | ("cost_benefit_term", r"\bcost[- ]benefit\b|\bcost benefit\b", 4.8), |
| | ("payback_roi", r"\bpayback period\b|\broi\b", 3.3), |
| | ("npv_term", r"\bnpv\b|\bnet present value\b", 3.1), |
| | ("savings_investment", r"\btotal savings\b|\bone-time investment\b|\bannual benefit\b|\bprofit opportunity\b", 2.2), |
| | ], |
| | "Market Sizing, TAM, SAM": [ |
| | ("market_sizing_term", r"\bmarket sizing\b|\bmarket size\b", 4.1), |
| | ("tam_sam_som", r"\btam\b|\bsam\b|\bsom\b|\baddressable market\b", 4.6), |
| | ("implied_share_size", r"\bimplied market share\b|\btotal market size\b", 2.2), |
| | ], |
| | "Operations Analysis": [ |
| | ("operations_term", r"\boperations?\b|\boperational\b|\bproductivity\b|\bworkforce\b|\bstaffing\b|\bheadcount\b", 2.8), |
| | ("plant_asset_maintenance", r"\bplant\b|\bequipment\b|\bmaintenance\b|\bdowntime\b|\bscrap\b|\byield\b|\basset\b", 2.5), |
| | ("throughput_capacity", r"\bcapacity\b|\butilization\b|\bthroughput\b|\bspan of control\b", 2.0), |
| | ("regression_term", r"\bregression\b|\bcorrelated\b", 1.8), |
| | ], |
| | "Scenario/Sensitivity Analysis": [ |
| | ("scenario_term", r"\bscenario\b|\bsensitivity\b", 3.8), |
| | ("case_terms", r"\blow case\b|\bmid case\b|\bhigh case\b|\bstress case\b", 2.8), |
| | ("assumption_shifts", r"\bassuming\b|\bwhat if\b|\badjust(?:ed|ment)\b|\bchange in\b", 1.5), |
| | ("if_then", r"\bif\b.{0,35}\bthen\b", 1.6), |
| | ], |
| | "Strategy Recommendations": [ |
| | ("recommend_term", r"\brecommend(?:ation|ed)?\b|\bshould proceed\b|\bgo/no-go\b", 4.8), |
| | ("strategic_option", r"\bstrategic option\b|\brecommended path\b|\bdecision score\b", 2.8), |
| | ], |
| | "Survey / Interview Analysis": [ |
| | ("survey_term", r"\bsurvey\b|\bquestionnaire\b|\brespondents?\b|\bresponse dataset\b", 4.0), |
| | ("interview_term", r"\binterview\b|\bcall summary\b|\bcohort\b|\bsentiment\b", 3.0), |
| | ("satisfaction_intent", r"\bsatisfaction\b|\bintent\b|\bpreferences?\b", 1.7), |
| | ], |
| | "Variance / Performance Analysis": [ |
| | ("variance_term", r"\bvariance\b|\bperformance analysis\b", 4.2), |
| | ("gap_delta", r"\bgap\b|\bdelta\b|\bdifference\b|\bvs\.?\b|\brelative to\b", 2.4), |
| | ("target_attainment", r"\btarget\b|\bover[- ]?perform\b|\bunder[- ]?perform\b|\battainment\b", 2.2), |
| | ("pp_change", r"\bpercentage points?\b|\b% change\b", 2.0), |
| | ], |
| | }, |
| | "Law": { |
| | "Compliance Program Review": [ |
| | ("compliance_review", r"\bcompliance review\b", 4.0), |
| | ("compliance_program", r"\bcompliance program\b", 3.0), |
| | ("policy_or_procedure", r"\bpolic(?:y|ies)\b|\bprocedures?\b|\bprotocols?\b", 1.9), |
| | ("regulatory_compliance", r"\bregulatory compliance\b|\bin compliance with\b", 2.0), |
| | ("framework_or_controls", r"\bframework\b|\bcontrols?\b", 1.6), |
| | ("audit_or_supervision", r"\baudit\b|\bmra\b|\bcfpb\b|\bsupervision and examination\b", 1.8), |
| | ("notice_obligation", r"\bnotification requirements?\b|\bbreach and incident response policy\b", 1.4), |
| | ("sec_disclosure_controls", r"\b8-k\b|\bform 8-k\b|\bsec\b|\breg fd\b|\brule 10b-5\b", 2.1), |
| | ("facility_fire_safety", r"\bfire safety\b|\binspection report\b|\bexit signage\b", 1.7), |
| | ], |
| | "Contract Review": [ |
| | ("agreement_or_contract", r"\bagreement\b|\bcontract\b|\bclause\b|\bterms?\b", 1.5), |
| | ("specific_agreement_type", r"\bmaster supply agreement\b|\bmsa\b|\blease\b|\boperating agreement\b|\bcharter party\b|\bjv agreement\b", 2.3), |
| | ("force_majeure", r"\bforce majeure\b", 2.8), |
| | ("execution_or_amendment", r"\bexecuted\b|\bamend(?:ed|ment)\b|\brevise\b|\bredline\b", 1.3), |
| | ("validity_or_notice", r"\bvalid\b.{0,35}\bnotice\b|\bcommencement date\b", 1.6), |
| | ("section_by_section", r"\barticles?\b\s+\d|\bsection[s]?\b\s+\d", 1.2), |
| | ], |
| | "Due Diligence": [ |
| | ("due_diligence_phrase", r"\bdue diligence\b|\bdiligence file\b|\bdiligence memo\b", 4.8), |
| | ("transaction_context", r"\bacquisition\b|\btransaction\b|\bpurchaser\b|\bseller\b|\bpost-?closing\b|\bpre-?closing\b", 2.4), |
| | ("deal_docs", r"\bshare purchase agreement\b|\bstock purchase agreement\b|\bspa\b|\bindemnities?\b|\brepresentations?\b", 2.0), |
| | ("diligence_review", r"\breview\b.{0,35}\bdiligence\b|\bdiligence\b.{0,35}\breview\b", 2.5), |
| | ("closing_checklist", r"\bclosing checklist\b|\bchecklist\b", 1.4), |
| | ("regulatory_deal_filing", r"\bhsr\b|\bfiling submission\b|\bpremerger\b", 1.8), |
| | ], |
| | "Internal Investigations": [ |
| | ("internal_investigation", r"\binternal investigation\b|\bincident investigation\b|\boutage investigation\b", 4.6), |
| | ("incident_postmortem", r"\bincident report\b|\bpostmortem\b|\broot cause\b|\btimeline\b|\bevent logs?\b", 3.4), |
| | ("email_chain", r"\bemail chain\b|\bemail exchange\b", 2.2), |
| | ("outage_findings", r"\boutage\b.{0,35}\binvestigation\b|\bindependent investigation\b", 2.4), |
| | ("forensic_review", r"\bforensic\b|\binterrogatories\b|\bcivil investigative demand\b", 1.6), |
| | ], |
| | "Legal Research": [ |
| | ("law_or_statute_question", r"\bunder\b.{0,45}\b(?:law|code|act|rule|regulation|statute)\b", 2.1), |
| | ("citation_request", r"\bcite\b|\brelevant section\b|\bwhat (?:does|is)\b|\bwhich section\b", 1.7), |
| | ("authority_tokens", r"\b\d+\s*u\.?s\.?c\.?\b|\b\d+\s*c\.?f\.?r\.?\b|\bfrcp\b|\bgdpr\b|\barticle \d+\b|\bncac\b|\bplanning act\b", 2.3), |
| | ("cases_and_courts", r"\bcase law\b|\bcourt\b|\bprecedent\b|\bholding\b|\bopinion\b", 1.9), |
| | ("requirements_question", r"\bdoes\b.{0,40}\brequire\b|\bis .* legal\b", 1.4), |
| | ], |
| | "Litigation Strategy": [ |
| | ("likelihood_success", r"\blikelihood of success\b|\bchance of success\b|\bsurviv(?:e|ing)\b.{0,40}\bsummary judgment\b", 4.2), |
| | ("claims_defenses", r"\bclaims?\b|\bdefenses?\b|\bcounterclaims?\b|\bstrongest argument\b", 2.3), |
| | ("forum_and_venue", r"\bvenue\b|\bjurisdiction\b|\barbitration\b|\bpre-?trial\b|\bsettlement\b", 1.9), |
| | ("dismissal_strategy", r"\bmotion to dismiss\b|\brule 12\b|\bstrategy\b", 1.9), |
| | ], |
| | "Motion Drafting": [ |
| | ("draft_motion_material", r"\bdraft\b.{0,55}\b(?:motion|complaint|brief|memorandum|memo|outline)\b", 4.4), |
| | ("prepare_motion_material", r"\bprepare\b.{0,55}\b(?:motion|brief|memorandum|outline)\b", 3.9), |
| | ("new_doc_litigation", r"\bpre-?litigation legal memorandum\b|\bsummary judgment\b", 2.7), |
| | ("edit_litigation_doc", r"\bedit existing\b.{0,35}\b(?:agreement|complaint|motion)\b", 1.8), |
| | ], |
| | "Risk Assessment": [ |
| | ("risk_or_exposure", r"\brisk\b|\bexposure\b|\bfinancial exposure\b", 2.0), |
| | ("liability_penalty", r"\bliability\b|\bfine\b|\bpenalty\b|\bdamages?\b|\brefund\b", 1.8), |
| | ("max_amount", r"\bmaximum\b.{0,35}\b(?:liability|fine|penalty|refund|exposure)\b", 2.4), |
| | ("amount_question", r"\bhow much\b|\bwhat amount\b|\bpotential\b", 1.2), |
| | ("risk_matrix", r"\bheat map\b|\bmitigation\b", 1.6), |
| | ("defect_or_fault", r"\bfaulty\b|\bdefect(?:ive)?\b|\boutage\b", 1.2), |
| | ], |
| | "Other": [ |
| | ("spreadsheet_output", r"\bmake_new_sheet\b|\bedit_existing_sheet\b", 4.8), |
| | ("capital_account", r"\bcapital account\b|\bdistribution amounts?\b", 2.6), |
| | ("child_support_calc", r"\bchild support\b", 2.8), |
| | ("quant_membership", r"\bpart of the class\b|\bhistoric stock transactions\b|\bmaximum refund amount\b", 2.4), |
| | ("distribution_calc", r"\bdetermine\b.{0,35}\bamounts? to be distributed\b", 2.6), |
| | ], |
| | }, |
| | } |
| |
|
| |
|
| | def _build_task_file_index(task_files_root: Path) -> dict[str, str]: |
| | """Build task_id -> concatenated file-name hint string.""" |
| | index: dict[str, str] = {} |
| | if not task_files_root.exists(): |
| | return index |
| |
|
| | for task_dir in sorted(task_files_root.glob("task_*")): |
| | if not task_dir.is_dir(): |
| | continue |
| | file_tokens: list[str] = [] |
| | for path in task_dir.rglob("*"): |
| | if path.is_file(): |
| | file_tokens.append(path.relative_to(task_dir).as_posix()) |
| | index[task_dir.name] = "\n".join(file_tokens) |
| | return index |
| |
|
| |
|
| | def _task_text(task: dict, task_file_hints: str) -> str: |
| | rubric_text = "\n".join(item.get("criteria", "") for item in task.get("rubric", [])) |
| | parts = [ |
| | task.get("prompt", ""), |
| | rubric_text, |
| | task.get("gold_response", ""), |
| | task.get("expected_output", ""), |
| | task_file_hints, |
| | ] |
| | return "\n".join(parts) |
| |
|
| |
|
| | def _score_task(task: dict, task_file_hints: str) -> tuple[dict[str, float], dict[str, list[str]]]: |
| | domain = task.get("domain") |
| | if domain not in DOMAIN_WORKFLOW_QUOTAS: |
| | return {}, {} |
| |
|
| | workflows = list(DOMAIN_WORKFLOW_QUOTAS[domain].keys()) |
| | priors = DOMAIN_BASE_PRIORS[domain] |
| | signals = DOMAIN_SIGNALS[domain] |
| |
|
| | text = _task_text(task, task_file_hints).lower() |
| | scores: dict[str, float] = {workflow: priors.get(workflow, 0.0) for workflow in workflows} |
| | reasons: DefaultDict[str, list[str]] = defaultdict(list) |
| |
|
| | def add(workflow: str, amount: float, reason: str) -> None: |
| | scores[workflow] += amount |
| | reasons[workflow].append(f"{reason} (+{amount:.1f})") |
| |
|
| | for workflow, rules in signals.items(): |
| | for name, pattern, weight in rules: |
| | if re.search(pattern, text, flags=re.IGNORECASE): |
| | add(workflow, weight, name) |
| |
|
| | expected_output = task.get("expected_output", "") |
| |
|
| | if domain == "Investment Banking": |
| | if expected_output in {"make_new_sheet", "edit_existing_sheet"}: |
| | add("Sensitivity Analysis", 2.4, "sheet_output_sensitivity") |
| | if expected_output == "make_new_slide_deck": |
| | add("Valuation Analysis", 1.3, "slide_output_valuation") |
| |
|
| | if re.search(r"\blbo\b", text) and re.search(r"\bscenario\b|\bsensitivity\b|\bshock\b|\bflex\b", text): |
| | add("Sensitivity Analysis", 1.5, "lbo_with_sensitivity") |
| | if re.search(r"\bdcf\b", text) and re.search(r"\bscenario\b|\bsensitivity\b|\bassum", text): |
| | add("Sensitivity Analysis", 1.2, "dcf_with_sensitivity") |
| | if re.search(r"\baccretion dilution model\b|\bmerger model\b", text): |
| | add("Merger Model", 2.0, "explicit_merger_model") |
| | if re.search(r"\bprecedent\b|\bpublic comparables?\b", text) and re.search(r"\bmultiple\b", text): |
| | add("Comparables", 1.4, "comparables_with_multiples") |
| | if re.search(r"\bimplied share price\b|\benterprise value\b", text) and not re.search( |
| | r"\blbo\b|\bmerger model\b", text |
| | ): |
| | add("Valuation Analysis", 1.2, "implied_value_focus") |
| | if re.search(r"\bdebt\b", text) and re.search(r"\bterm loan\b|\brevolver\b|\binterest coverage\b", text): |
| | add("Debt Model", 1.6, "debt_instrument_focus") |
| |
|
| | if domain == "Management Consulting": |
| | if expected_output in {"make_new_slide_deck", "edit_existing_slide_deck"}: |
| | add("Benchmarking / Competitive Analysis", 0.9, "slide_output_benchmarking") |
| | if expected_output == "make_new_doc": |
| | add("Strategy Recommendations", 1.0, "doc_output_strategy") |
| |
|
| | if re.search(r"\bsurvey\b", text) and re.search(r"\brecommend", text): |
| | add("Strategy Recommendations", 1.1, "survey_with_recommendation") |
| | if re.search(r"\bpayback period\b|\bone-time investment\b|\bannual savings\b", text): |
| | add("Cost Benefit Analysis", 2.2, "payback_investment_focus") |
| | if re.search(r"\bregression\b|\bcorrelat", text): |
| | add("Operations Analysis", 1.6, "regression_operations_focus") |
| | if re.search(r"\bgap\b|\bdelta\b|\bversus target\b|\brelative to\b", text): |
| | add("Variance / Performance Analysis", 1.3, "gap_vs_target") |
| | if re.search(r"\bscenario\b", text) and re.search(r"\bassum", text): |
| | add("Scenario/Sensitivity Analysis", 1.4, "scenario_with_assumptions") |
| | if re.search(r"\btam\b|\bsam\b|\bsom\b", text): |
| | add("Market Sizing, TAM, SAM", 1.7, "tam_sam_som_bonus") |
| |
|
| | if domain == "Law": |
| | if expected_output in {"make_new_doc", "edit_existing_doc"}: |
| | if re.search(r"\bmotion\b|\bcomplaint\b|\bbrief\b|\bsummary judgment\b", text): |
| | add("Motion Drafting", 2.0, "doc_output_with_litigation_terms") |
| | elif re.search(r"\bagreement\b|\bcontract\b|\blease\b|\bmsa\b", text): |
| | add("Contract Review", 1.3, "doc_output_with_contract_terms") |
| | else: |
| | add("Motion Drafting", 0.8, "doc_output_default") |
| |
|
| | if expected_output in {"make_new_sheet", "edit_existing_sheet"}: |
| | add("Other", 5.0, "sheet_output") |
| |
|
| | if re.search(r"\bclass action\b|\bmotion to dismiss\b|\bsummary judgment\b", text): |
| | add("Litigation Strategy", 1.2, "litigation_posture") |
| |
|
| | if re.search(r"\bcheck these (?:four )?faxes\b|\bfor each item\b|\bindicate whether\b", text): |
| | add("Compliance Program Review", 1.0, "compliance_checklist_style") |
| |
|
| | if re.search(r"\b8-k\b|\bform 8-k\b|\bsec\b|\brule 10b-5\b|\breg fd\b", text): |
| | add("Compliance Program Review", 1.4, "sec_disclosure_context") |
| | add("Legal Research", 0.8, "sec_disclosure_context") |
| |
|
| | if re.search(r"\breview\b.{0,30}\bagreement\b", text) and re.search(r"\bcan\b|\bmay\b|\bvalid\b", text): |
| | add("Contract Review", 1.1, "agreement_interpretation") |
| |
|
| | if re.search(r"\bbreach\b|\boutage\b", text) and re.search(r"\bincident report\b|\bpostmortem\b", text): |
| | add("Internal Investigations", 1.5, "breach_incident_combo") |
| |
|
| | if re.search( |
| | r"\bcapital account\b|\bamounts? to be distributed\b|\bchild support\b|\bmaximum refund amount\b", |
| | text, |
| | ): |
| | add("Other", 1.8, "quantitative_legal_calculation") |
| |
|
| | return scores, reasons |
| |
|
| |
|
| | def _quota_assign( |
| | task_ids: list[str], |
| | scores_by_task: dict[str, dict[str, float]], |
| | workflow_quotas: dict[str, int], |
| | domain: str, |
| | ) -> dict[str, str]: |
| | slots: list[str] = [] |
| | for workflow, count in workflow_quotas.items(): |
| | slots.extend([workflow] * count) |
| |
|
| | if len(task_ids) != len(slots): |
| | raise ValueError( |
| | f"{domain} task count ({len(task_ids)}) does not match workflow quota total ({len(slots)})." |
| | ) |
| |
|
| | score_matrix = np.zeros((len(task_ids), len(slots)), dtype=np.float64) |
| | for row_idx, task_id in enumerate(task_ids): |
| | scores = scores_by_task[task_id] |
| | for col_idx, workflow in enumerate(slots): |
| | score_matrix[row_idx, col_idx] = scores[workflow] |
| |
|
| | row_ind, col_ind = linear_sum_assignment(-score_matrix) |
| | assignments: dict[str, str] = {} |
| | for row_idx, col_idx in zip(row_ind, col_ind): |
| | assignments[task_ids[row_idx]] = slots[col_idx] |
| | return assignments |
| |
|
| |
|
| | def _score_rank(scores: dict[str, float], assigned_workflow: str) -> int: |
| | sorted_items = sorted(scores.items(), key=lambda item: item[1], reverse=True) |
| | for idx, (workflow, _) in enumerate(sorted_items, start=1): |
| | if workflow == assigned_workflow: |
| | return idx |
| | return len(sorted_items) |
| |
|
| |
|
| | def _confidence(scores: dict[str, float], assigned_workflow: str) -> float: |
| | vals = sorted(scores.values(), reverse=True) |
| | top = vals[0] |
| | second = vals[1] if len(vals) > 1 else vals[0] |
| | assigned = scores[assigned_workflow] |
| | margin = assigned - second if assigned == top else assigned - top |
| | spread = max(vals) - min(vals) + 1e-6 |
| | scaled = margin / (spread / 2.0 + 1e-6) |
| | return 1.0 / (1.0 + math.exp(-scaled)) |
| |
|
| |
|
| | def _augment_tasks(tasks: list[dict], task_file_index: dict[str, str]) -> tuple[list[dict], list[dict]]: |
| | domain_scores: dict[str, dict[str, dict[str, float]]] = {} |
| | domain_reasons: dict[str, dict[str, dict[str, list[str]]]] = {} |
| | domain_assignments: dict[str, dict[str, str]] = {} |
| |
|
| | for domain, quotas in DOMAIN_WORKFLOW_QUOTAS.items(): |
| | domain_tasks = [task for task in tasks if task.get("domain") == domain] |
| | task_ids = [task["task_id"] for task in domain_tasks] |
| |
|
| | scores_by_task: dict[str, dict[str, float]] = {} |
| | reasons_by_task: dict[str, dict[str, list[str]]] = {} |
| | for task in domain_tasks: |
| | task_id = task["task_id"] |
| | file_hints = task_file_index.get(task_id, "") |
| | scores, reasons = _score_task(task, file_hints) |
| | scores_by_task[task_id] = scores |
| | reasons_by_task[task_id] = reasons |
| |
|
| | assignments = _quota_assign(task_ids, scores_by_task, quotas, domain) |
| | domain_scores[domain] = scores_by_task |
| | domain_reasons[domain] = reasons_by_task |
| | domain_assignments[domain] = assignments |
| |
|
| | augmented: list[dict] = [] |
| | audit_rows: list[dict] = [] |
| |
|
| | for task in tasks: |
| | task_copy = dict(task) |
| | domain = task_copy.get("domain") |
| | if domain in DOMAIN_WORKFLOW_QUOTAS: |
| | task_id = task_copy["task_id"] |
| | workflow = domain_assignments[domain][task_id] |
| | scores = domain_scores[domain][task_id] |
| | conf = _confidence(scores, workflow) |
| | rank = _score_rank(scores, workflow) |
| |
|
| | task_copy["workflow"] = workflow |
| | task_copy["workflow_inference"] = { |
| | "source": "heuristic_quota_constrained_v2_all_domains", |
| | "confidence": round(conf, 4), |
| | "assigned_score_rank": rank, |
| | "reason_signals": domain_reasons[domain][task_id].get(workflow, [])[:6], |
| | "paper_domain_quota_aligned": True, |
| | } |
| |
|
| | sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) |
| | audit_rows.append( |
| | { |
| | "domain": domain, |
| | "task_id": task_id, |
| | "task_name": task_copy.get("task_name", ""), |
| | "workflow": workflow, |
| | "confidence": f"{conf:.4f}", |
| | "assigned_score_rank": rank, |
| | "top1_workflow": sorted_scores[0][0], |
| | "top1_score": f"{sorted_scores[0][1]:.3f}", |
| | "top2_workflow": sorted_scores[1][0], |
| | "top2_score": f"{sorted_scores[1][1]:.3f}", |
| | "top3_workflow": sorted_scores[2][0], |
| | "top3_score": f"{sorted_scores[2][1]:.3f}", |
| | "assigned_signals": " | ".join(domain_reasons[domain][task_id].get(workflow, [])[:6]), |
| | } |
| | ) |
| |
|
| | augmented.append(task_copy) |
| |
|
| | return augmented, audit_rows |
| |
|
| |
|
| | def _write_audit_csv(rows: list[dict], path: Path) -> None: |
| | fieldnames = [ |
| | "domain", |
| | "task_id", |
| | "task_name", |
| | "workflow", |
| | "confidence", |
| | "assigned_score_rank", |
| | "top1_workflow", |
| | "top1_score", |
| | "top2_workflow", |
| | "top2_score", |
| | "top3_workflow", |
| | "top3_score", |
| | "assigned_signals", |
| | ] |
| | with path.open("w", encoding="utf-8", newline="") as fp: |
| | writer = csv.DictWriter(fp, fieldnames=fieldnames) |
| | writer.writeheader() |
| | writer.writerows(rows) |
| |
|
| |
|
| | def parse_args() -> argparse.Namespace: |
| | parser = argparse.ArgumentParser(description=__doc__) |
| | parser.add_argument( |
| | "--input", |
| | type=Path, |
| | default=Path("tasks_and_rubrics.json"), |
| | help="Input task JSON file.", |
| | ) |
| | parser.add_argument( |
| | "--output", |
| | type=Path, |
| | default=Path("tasks_and_rubrics_with_workflow.json"), |
| | help="Output JSON file with inferred `workflow` labels.", |
| | ) |
| | parser.add_argument( |
| | "--task-files-root", |
| | type=Path, |
| | default=Path("task_files"), |
| | help="Root folder containing `task_<id>` subfolders.", |
| | ) |
| | parser.add_argument( |
| | "--audit-output", |
| | type=Path, |
| | default=Path("workflow_inference_audit.csv"), |
| | help="CSV path for per-task assignment diagnostics.", |
| | ) |
| | return parser.parse_args() |
| |
|
| |
|
| | def main() -> None: |
| | args = parse_args() |
| | tasks = json.loads(args.input.read_text(encoding="utf-8")) |
| | if not isinstance(tasks, list): |
| | raise ValueError("Input JSON must be an array of task objects.") |
| |
|
| | task_file_index = _build_task_file_index(args.task_files_root) |
| | augmented_tasks, audit_rows = _augment_tasks(tasks, task_file_index) |
| |
|
| | args.output.write_text(json.dumps(augmented_tasks, indent=2, ensure_ascii=False), encoding="utf-8") |
| | _write_audit_csv(audit_rows, args.audit_output) |
| |
|
| | print(f"Wrote {args.output} ({len(augmented_tasks)} tasks)") |
| | print(f"Wrote {args.audit_output} ({len(audit_rows)} labeled tasks)") |
| | for domain, quotas in DOMAIN_WORKFLOW_QUOTAS.items(): |
| | distribution = Counter( |
| | task.get("workflow") |
| | for task in augmented_tasks |
| | if task.get("domain") == domain |
| | ) |
| | print(f"Inferred {domain} workflow distribution:") |
| | for workflow in quotas: |
| | print(f" {workflow}: {distribution.get(workflow, 0)}") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|