Spaces:
Sleeping
Sleeping
Demo A + Demo B
Browse files- data/run_logs.jsonl +5 -0
- demo_guide.docx +0 -0
- framework_demo_v2.py → framework_demo_a.py +4 -2
- framework_demo_b.py +561 -0
- requirements_demo.txt → requirements_demo_a.txt +0 -0
- requirements_demo_b.txt +7 -0
data/run_logs.jsonl
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"run_id": "run_20260227T075002Z_6e0866a2", "agent_id": "demo_agent", "model_id": "workflow_demo_model", "version_id": "0.1.0", "policy_id": "hitl_and_pricing_policy", "policy_version": "1.0", "task_type": "fintech_credit_risk", "timestamps": {"created_at": "2026-02-27T07:50:02+00:00"}, "decision": "Decision Draft", "result": {"risk_score_pd": 0.32, "requested_amount": 250000.0, "routing_reason": "LOW_RISK", "explainability": {"formula": "risk = 0.6*(debt/income) + 0.4*((850-credit_score)/850)", "intermediates": {"debt_to_income": 0.4, "score_gap": 0.2}, "how_to_read": "Higher risk_score_pd means higher predicted risk (baseline, not calibrated)."}}, "evidence": {"steps": [{"step_id": "step_20260227T075002Z_e0111644", "name": "memory_touch", "started_at": "2026-02-27T07:50:02+00:00", "ended_at": "2026-02-27T07:50:02+00:00", "duration_ms": 25, "inputs": {"thread_id": "demo_thread"}, "outputs": {"thread_state": {"note": "no_state_returned", "thread_id": "demo_thread"}}, "evidence": {"checkpointer": "sqlite"}, "error": null}, {"step_id": "step_20260227T075002Z_14980458", "name": "risk_scoring", "started_at": "2026-02-27T07:50:02+00:00", "ended_at": "2026-02-27T07:50:02+00:00", "duration_ms": 0, "inputs": {"income": 75000.0, "debt": 30000.0, "credit_score": 680}, "outputs": {"risk_score_pd": 0.32, "intermediates": {"debt_to_income": 0.4, "score_gap": 0.2}, "formula": "risk = 0.6*(debt/income) + 0.4*((850-credit_score)/850)"}, "evidence": {}, "error": null}, {"step_id": "step_20260227T075002Z_bde83fbe", "name": "hitl_policy", "started_at": "2026-02-27T07:50:02+00:00", "ended_at": "2026-02-27T07:50:02+00:00", "duration_ms": 0, "inputs": {"risk_score_pd": 0.32, "requested_amount": 250000.0}, "outputs": {"decision": "Decision Draft", "reason": "LOW_RISK", "threshold": 0.5, "band": 0.05}, "evidence": {"policy": {"threshold": 0.5, "border_band": 0.05, "high_impact_amount": 1000000.0}}, "error": null}]}}
|
| 2 |
+
{"run_id": "run_20260227T075133Z_54f884cf", "agent_id": "demo_agent", "model_id": "workflow_demo_model", "version_id": "0.1.0", "policy_id": "hitl_and_pricing_policy", "policy_version": "1.0", "task_type": "te_pricing", "timestamps": {"created_at": "2026-02-27T07:51:33+00:00"}, "decision": "Pricing Draft", "result": {"brand_context": "TE", "pricing": {"presale_price": 41.99, "retail_price": 51.99, "unit_margin_presale": 31.990000000000002, "unit_margin_retail": 41.99, "assumptions": {"alpha": 120.0, "beta": 0.08, "discount": 0.2}, "optimization": {"presale": 42.0, "demand": 4.168231073368628, "objective_profit": 133.3833943477961}, "policy_checks": {"presale_ge_floor": false, "presale_ge_7xcogs": false, "retail_gt_presale": true}, "benchmark_placeholder": [{"category": "smart_plug", "brand": "TP-Link Kasa"}, {"category": "smart_plug", "brand": "Amazon Smart Plug"}, {"category": "in_wall_outlet", "brand": "Leviton Decora Smart"}, {"category": "premium", "brand": "Eve (Matter/Thread)"}], "limitations": "Benchmark list is a placeholder. Replace with a curated dataset for evidence."}, "explainability": {"objective": "(price - landed) * demand(price)", "demand_curve": "alpha * exp(-beta * price) (placeholder)", "why_placeholder": "Replace alpha/beta using real presale conversion data."}}, "evidence": {"steps": [{"step_id": "step_20260227T075133Z_a29bd2d9", "name": "memory_touch", "started_at": "2026-02-27T07:51:33+00:00", "ended_at": "2026-02-27T07:51:33+00:00", "duration_ms": 9, "inputs": {"thread_id": "demo_thread"}, "outputs": {"thread_state": {"note": "no_state_returned", "thread_id": "demo_thread"}}, "evidence": {"checkpointer": "sqlite"}, "error": null}, {"step_id": "step_20260227T075133Z_72e0332c", "name": "pricing_optimization", "started_at": "2026-02-27T07:51:33+00:00", "ended_at": "2026-02-27T07:51:33+00:00", "duration_ms": 0, "inputs": {"cogs": 6.0, "landed": 10.0, "presale_mult": 7.0, "discount": 0.2}, "outputs": {"presale_price": 41.99, "retail_price": 51.99, "policy_checks": {"presale_ge_floor": false, "presale_ge_7xcogs": false, "retail_gt_presale": true}}, "evidence": {}, "error": null}], "brand_rule": "TE naming only on this tab."}}
|
| 3 |
+
{"run_id": "run_20260227T082015Z_b59ab55a", "agent_id": "nexdatawork_demo_agent", "model_id": "framework_langgraph_agent", "version_id": "1.0.0", "policy_id": "hitl_and_pricing_policy", "policy_version": "1.0", "llm_model": "gpt-4o-mini", "task_type": "fintech_credit_risk", "timestamps": {"created_at": "2026-02-27T08:20:15+00:00"}, "decision": "Draft", "result": {"raw_text": "### Decision Summary\n- **Decision**: Decision Draft\n- **Reason**: LOW_RISK (The risk score is below the threshold of 0.5)\n\n### Tool JSON Outputs\n1. **fintech_score Output**:\n ```json\n {\n \"risk_score_pd\": 0.32,\n \"intermediates\": {\n \"debt_to_income\": 0.4,\n \"score_gap\": 0.2\n },\n \"formula\": \"risk = 0.6*(debt/income) + 0.4*((850-credit_score)/850)\"\n }\n ```\n\n2. **hitl_route Output**:\n ```json\n {\n \"decision\": \"Decision Draft\",\n \"reason\": \"LOW_RISK\",\n \"threshold\": 0.5,\n \"band\": 0.05\n }\n ```", "error": null}, "evidence": {"steps": [{"step_id": "step_20260227T082015Z_ef940f31", "name": "memory_scope", "started_at": "2026-02-27T08:20:15+00:00", "ended_at": "2026-02-27T08:20:15+00:00", "duration_ms": 0, "inputs": {"thread_id": "demo_thread"}, "outputs": {"checkpointer_kind": "sqlite"}, "evidence": {"note": "Memory is keyed by thread_id via LangGraph checkpointer."}, "error": null}, {"step_id": "step_20260227T082027Z_9b64b7ef", "name": "agent_invoke", "started_at": "2026-02-27T08:20:27+00:00", "ended_at": "2026-02-27T08:20:27+00:00", "duration_ms": 6241, "inputs": {"task_type": "fintech_credit_risk"}, "outputs": {"raw_text_preview": "### Decision Summary\n- **Decision**: Decision Draft\n- **Reason**: LOW_RISK (The risk score is below the threshold of 0.5)\n\n### Tool JSON Outputs\n1. **fintech_score Output**:\n ```json\n {\n \"risk_score_pd\": 0.32,\n \"intermediates\": {\n \"debt_to_income\": 0.4,\n \"score_gap\": 0.2\n },\n \"formula\": \"risk = 0.6*(debt/income) + 0.4*((850-credit_score)/850)\"\n }\n ```\n\n2. **hitl_route Output**:\n ```json\n {\n \"decision\": \"Decision Draft\",\n \"reason\": \"LOW_RISK\",\n \"threshold\": 0.5,\n \"band\": 0.05\n }\n ```"}, "evidence": {"llm_model": "gpt-4o-mini"}, "error": null}]}}
|
| 4 |
+
{"run_id": "run_20260227T082059Z_9a7de9cf", "agent_id": "nexdatawork_demo_agent", "model_id": "framework_langgraph_agent", "version_id": "1.0.0", "policy_id": "hitl_and_pricing_policy", "policy_version": "1.0", "llm_model": "gpt-4o-mini", "task_type": "te_pricing", "timestamps": {"created_at": "2026-02-27T08:20:59+00:00"}, "decision": "Draft", "result": {"raw_text": "### Suggested Prices\n- **Presale Price**: $41.99\n- **Retail Price**: $51.99\n\n### Policy Checks Status\n- **Presale >= 7 * COGS**: **False** (The presale price does not meet the required minimum based on COGS)\n- **Retail > Presale**: **True** (The retail price is greater than the presale price)\n\n### Note on Demand Model Accuracy\nTo make the demand model more accurate, we need to replace the placeholder values for alpha and beta with real presale conversion rates or fitted demand model data. This will help in better predicting demand based on pricing strategies.\n\n### Benchmark Information\n- **Placeholder Competitors**:\n - Smart Plug: TP-Link Kasa\n - Smart Plug: Amazon Smart Plug\n - In-Wall Outlet: Leviton Decora Smart\n - Premium: Eve (Matter/Thread)\n\n**Limitations**: The benchmark data is a placeholder and should be verified with real market data.", "error": null}, "evidence": {"steps": [{"step_id": "step_20260227T082059Z_d4dbe67e", "name": "memory_scope", "started_at": "2026-02-27T08:20:59+00:00", "ended_at": "2026-02-27T08:20:59+00:00", "duration_ms": 0, "inputs": {"thread_id": "demo_thread"}, "outputs": {"checkpointer_kind": "sqlite"}, "evidence": {"note": "Memory is keyed by thread_id via LangGraph checkpointer."}, "error": null}, {"step_id": "step_20260227T082106Z_e1ccf5d1", "name": "agent_invoke", "started_at": "2026-02-27T08:21:06+00:00", "ended_at": "2026-02-27T08:21:06+00:00", "duration_ms": 7382, "inputs": {"task_type": "te_pricing"}, "outputs": {"raw_text_preview": "### Suggested Prices\n- **Presale Price**: $41.99\n- **Retail Price**: $51.99\n\n### Policy Checks Status\n- **Presale >= 7 * COGS**: **False** (The presale price does not meet the required minimum based on COGS)\n- **Retail > Presale**: **True** (The retail price is greater than the presale price)\n\n### Note on Demand Model Accuracy\nTo make the demand model more accurate, we need to replace the placeholder values for alpha and beta with real presale conversion rates or fitted demand model data. This will help in better predicting demand based on pricing strategies.\n\n### Benchmark Information\n- **Placeholder Competitors**:\n - Smart Plug: TP-Link Kasa\n - Smart Plug: Amazon Smart Plug\n - In-Wall Outlet: Leviton Decora Smart\n - Premium: Eve (Matter/Thread)\n\n**Limitations**: The benchmark data is a placeholder and should be verified with real market data."}, "evidence": {"llm_model": "gpt-4o-mini"}, "error": null}]}}
|
| 5 |
+
{"run_id": "run_20260227T082224Z_6b27b255", "agent_id": "nexdatawork_demo_agent", "model_id": "framework_langgraph_agent", "version_id": "1.0.0", "policy_id": "hitl_and_pricing_policy", "policy_version": "1.0", "llm_model": "gpt-4o-mini", "task_type": "te_pricing", "timestamps": {"created_at": "2026-02-27T08:22:24+00:00"}, "decision": "Draft", "result": {"raw_text": "### Suggested Prices\n- **Presale Price**: $41.99\n- **Retail Price**: $59.99\n\n### Policy Checks Status\n- **Presale >= 7 * COGS**: **False** (The presale price does not meet the required minimum based on COGS)\n- **Retail > Presale**: **True** (The retail price is greater than the presale price)\n\n### Note on Demand Model Accuracy\nTo improve the accuracy of the demand model, we need to replace the placeholder values for alpha and beta with actual presale conversion rates or fitted demand model data. This will enhance our ability to predict demand based on pricing strategies.\n\n### Benchmark Information\n- **Placeholder Competitors**:\n - Smart Plug: TP-Link Kasa\n - Smart Plug: Amazon Smart Plug\n - In-Wall Outlet: Leviton Decora Smart\n - Premium: Eve (Matter/Thread)\n\n**Limitations**: The benchmark data is a placeholder and should be verified with real market data.", "error": null}, "evidence": {"steps": [{"step_id": "step_20260227T082224Z_7ad1d6de", "name": "memory_scope", "started_at": "2026-02-27T08:22:24+00:00", "ended_at": "2026-02-27T08:22:24+00:00", "duration_ms": 0, "inputs": {"thread_id": "demo_thread"}, "outputs": {"checkpointer_kind": "sqlite"}, "evidence": {"note": "Memory is keyed by thread_id via LangGraph checkpointer."}, "error": null}, {"step_id": "step_20260227T082231Z_a15bc842", "name": "agent_invoke", "started_at": "2026-02-27T08:22:31+00:00", "ended_at": "2026-02-27T08:22:31+00:00", "duration_ms": 6833, "inputs": {"task_type": "te_pricing"}, "outputs": {"raw_text_preview": "### Suggested Prices\n- **Presale Price**: $41.99\n- **Retail Price**: $59.99\n\n### Policy Checks Status\n- **Presale >= 7 * COGS**: **False** (The presale price does not meet the required minimum based on COGS)\n- **Retail > Presale**: **True** (The retail price is greater than the presale price)\n\n### Note on Demand Model Accuracy\nTo improve the accuracy of the demand model, we need to replace the placeholder values for alpha and beta with actual presale conversion rates or fitted demand model data. This will enhance our ability to predict demand based on pricing strategies.\n\n### Benchmark Information\n- **Placeholder Competitors**:\n - Smart Plug: TP-Link Kasa\n - Smart Plug: Amazon Smart Plug\n - In-Wall Outlet: Leviton Decora Smart\n - Premium: Eve (Matter/Thread)\n\n**Limitations**: The benchmark data is a placeholder and should be verified with real market data."}, "evidence": {"llm_model": "gpt-4o-mini"}, "error": null}]}}
|
demo_guide.docx
CHANGED
|
Binary files a/demo_guide.docx and b/demo_guide.docx differ
|
|
|
framework_demo_v2.py → framework_demo_a.py
RENAMED
|
@@ -524,7 +524,7 @@ def build_gradio_app():
|
|
| 524 |
def view_logs(n):
|
| 525 |
return json.dumps(LOGGER.tail(int(n)), indent=2)
|
| 526 |
|
| 527 |
-
with gr.Blocks(title="
|
| 528 |
gr.Markdown(
|
| 529 |
"## Framework Demo (LangGraph + Traceable)\n"
|
| 530 |
"- FinTech tab: single-case credit risk + HITL routing\n"
|
|
@@ -585,8 +585,10 @@ def build_gradio_app():
|
|
| 585 |
|
| 586 |
def main():
|
| 587 |
demo = build_gradio_app()
|
| 588 |
-
demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))
|
| 589 |
|
| 590 |
|
| 591 |
if __name__ == "__main__":
|
| 592 |
main()
|
|
|
|
|
|
|
|
|
| 524 |
def view_logs(n):
|
| 525 |
return json.dumps(LOGGER.tail(int(n)), indent=2)
|
| 526 |
|
| 527 |
+
with gr.Blocks(title="Demo A") as demo:
|
| 528 |
gr.Markdown(
|
| 529 |
"## Framework Demo (LangGraph + Traceable)\n"
|
| 530 |
"- FinTech tab: single-case credit risk + HITL routing\n"
|
|
|
|
| 585 |
|
| 586 |
def main():
|
| 587 |
demo = build_gradio_app()
|
| 588 |
+
demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860"), shared=True))
|
| 589 |
|
| 590 |
|
| 591 |
if __name__ == "__main__":
|
| 592 |
main()
|
| 593 |
+
|
| 594 |
+
# http://127.0.0.1:7860/
|
framework_demo_b.py
ADDED
|
@@ -0,0 +1,561 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
LangGraph + LangChain Framework Demo (Stateful + Traceable) with OpenAI
|
| 3 |
+
|
| 4 |
+
This version is closer to LangChain/LangGraph "agent + memory" patterns:
|
| 5 |
+
- LangGraph ReAct agent (tool-calling) with short-term memory via checkpointer (thread_id)
|
| 6 |
+
- Traceable run logs: run_id, model_id, version_id, policy_id, step traces, evidence
|
| 7 |
+
- Two business workflows:
|
| 8 |
+
1) FinTech credit risk demo (single case) + HITL policy routing
|
| 9 |
+
2) TE consumer product pricing (presale + retail) + constraint checks + benchmark draft (LLM, no web)
|
| 10 |
+
|
| 11 |
+
Gradio:
|
| 12 |
+
- Tab 1: FinTech (form -> agent decides tools -> structured output + explanation)
|
| 13 |
+
- Tab 2: TE Pricing (form -> agent decides tools -> structured output + explanation)
|
| 14 |
+
- Tab 3: Logs (tail)
|
| 15 |
+
|
| 16 |
+
Hugging Face:
|
| 17 |
+
- Rename this file to app.py
|
| 18 |
+
- Use requirements_langgraph.txt as requirements.txt
|
| 19 |
+
- Add OPENAI_API_KEY in Space secrets
|
| 20 |
+
|
| 21 |
+
Security:
|
| 22 |
+
- This file redacts API keys from logs and UI outputs.
|
| 23 |
+
|
| 24 |
+
Note:
|
| 25 |
+
- "Benchmark research" here is AI-generated only. No browsing. Treat as draft until verified.
|
| 26 |
+
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
from __future__ import annotations
|
| 30 |
+
|
| 31 |
+
import json
|
| 32 |
+
import os
|
| 33 |
+
import re
|
| 34 |
+
import time
|
| 35 |
+
import uuid
|
| 36 |
+
import sqlite3
|
| 37 |
+
from dataclasses import asdict, dataclass, field
|
| 38 |
+
from datetime import datetime, timezone
|
| 39 |
+
from pathlib import Path
|
| 40 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# =========================
|
| 44 |
+
# IDs + versions (traceable)
|
| 45 |
+
# =========================
|
| 46 |
+
|
| 47 |
+
AGENT_ID = os.getenv("AGENT_ID", "nexdatawork_demo_agent")
|
| 48 |
+
MODEL_ID = os.getenv("MODEL_ID", "framework_langgraph_agent")
|
| 49 |
+
VERSION_ID = os.getenv("VERSION_ID", "1.0.0")
|
| 50 |
+
|
| 51 |
+
POLICY_ID = os.getenv("POLICY_ID", "hitl_and_pricing_policy")
|
| 52 |
+
POLICY_VERSION = os.getenv("POLICY_VERSION", "1.0")
|
| 53 |
+
|
| 54 |
+
LLM_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
|
| 55 |
+
|
| 56 |
+
# Artifacts for HF Spaces
|
| 57 |
+
DATA_DIR = Path(os.getenv("DATA_DIR", "./data"))
|
| 58 |
+
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
| 59 |
+
RUN_LOG_PATH = DATA_DIR / os.getenv("RUN_LOG_PATH", "run_logs.jsonl")
|
| 60 |
+
CHECKPOINT_PATH = DATA_DIR / os.getenv("CHECKPOINT_PATH", "checkpoints.sqlite")
|
| 61 |
+
|
| 62 |
+
# FinTech policy knobs
|
| 63 |
+
RISK_THRESHOLD = float(os.getenv("RISK_THRESHOLD", "0.50"))
|
| 64 |
+
BORDER_BAND = float(os.getenv("BORDER_BAND", "0.05"))
|
| 65 |
+
HIGH_IMPACT_AMOUNT = float(os.getenv("HIGH_IMPACT_AMOUNT", "1000000"))
|
| 66 |
+
|
| 67 |
+
# TE pricing defaults
|
| 68 |
+
DEFAULT_COGS = float(os.getenv("DEFAULT_COGS", "6"))
|
| 69 |
+
DEFAULT_LANDED = float(os.getenv("DEFAULT_LANDED", "10"))
|
| 70 |
+
DEFAULT_PRESALE_MULT = float(os.getenv("DEFAULT_PRESALE_MULT", "7"))
|
| 71 |
+
DEFAULT_PRESALE_DISCOUNT = float(os.getenv("DEFAULT_PRESALE_DISCOUNT", "0.20"))
|
| 72 |
+
DEFAULT_PRICE_GRID = [x for x in range(42, 121, 1)] # 42..120 (demo grid)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
# =========================
|
| 76 |
+
# Helpers
|
| 77 |
+
# =========================
|
| 78 |
+
|
| 79 |
+
def utc_now() -> str:
|
| 80 |
+
return datetime.now(timezone.utc).replace(microsecond=0).isoformat()
|
| 81 |
+
|
| 82 |
+
def new_id(prefix: str) -> str:
|
| 83 |
+
ts = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
|
| 84 |
+
return f"{prefix}_{ts}_{uuid.uuid4().hex[:8]}"
|
| 85 |
+
|
| 86 |
+
def run_metadata(task_type: str) -> Dict[str, Any]:
|
| 87 |
+
return {
|
| 88 |
+
"run_id": new_id("run"),
|
| 89 |
+
"agent_id": AGENT_ID,
|
| 90 |
+
"model_id": MODEL_ID,
|
| 91 |
+
"version_id": VERSION_ID,
|
| 92 |
+
"policy_id": POLICY_ID,
|
| 93 |
+
"policy_version": POLICY_VERSION,
|
| 94 |
+
"llm_model": LLM_MODEL,
|
| 95 |
+
"task_type": task_type,
|
| 96 |
+
"timestamps": {"created_at": utc_now()},
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
# =========================
|
| 101 |
+
# Redaction (API key safety)
|
| 102 |
+
# =========================
|
| 103 |
+
|
| 104 |
+
_API_KEY_PATTERN = re.compile(r"sk-[A-Za-z0-9_\-]{20,}")
|
| 105 |
+
|
| 106 |
+
def redact_text(s: str) -> str:
|
| 107 |
+
if not isinstance(s, str):
|
| 108 |
+
return s
|
| 109 |
+
return _API_KEY_PATTERN.sub("sk-REDACTED", s)
|
| 110 |
+
|
| 111 |
+
def redact(obj: Any) -> Any:
|
| 112 |
+
if isinstance(obj, str):
|
| 113 |
+
return redact_text(obj)
|
| 114 |
+
if isinstance(obj, list):
|
| 115 |
+
return [redact(x) for x in obj]
|
| 116 |
+
if isinstance(obj, dict):
|
| 117 |
+
return {k: redact(v) for k, v in obj.items()}
|
| 118 |
+
return obj
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
# =========================
|
| 122 |
+
# Traceable logs
|
| 123 |
+
# =========================
|
| 124 |
+
|
| 125 |
+
@dataclass
|
| 126 |
+
class StepTrace:
|
| 127 |
+
step_id: str
|
| 128 |
+
name: str
|
| 129 |
+
started_at: str
|
| 130 |
+
ended_at: str
|
| 131 |
+
duration_ms: int
|
| 132 |
+
inputs: Dict[str, Any] = field(default_factory=dict)
|
| 133 |
+
outputs: Dict[str, Any] = field(default_factory=dict)
|
| 134 |
+
evidence: Dict[str, Any] = field(default_factory=dict)
|
| 135 |
+
error: Optional[str] = None
|
| 136 |
+
|
| 137 |
+
class TraceLogger:
|
| 138 |
+
def __init__(self, path: Path):
|
| 139 |
+
self.path = path
|
| 140 |
+
self.path.parent.mkdir(parents=True, exist_ok=True)
|
| 141 |
+
|
| 142 |
+
def log(self, payload: Dict[str, Any]) -> None:
|
| 143 |
+
payload = redact(payload)
|
| 144 |
+
with self.path.open("a", encoding="utf-8") as f:
|
| 145 |
+
f.write(json.dumps(payload, ensure_ascii=False) + "\n")
|
| 146 |
+
|
| 147 |
+
def tail(self, n: int = 30) -> List[Dict[str, Any]]:
|
| 148 |
+
if not self.path.exists():
|
| 149 |
+
return []
|
| 150 |
+
lines = self.path.read_text(encoding="utf-8").splitlines()
|
| 151 |
+
out: List[Dict[str, Any]] = []
|
| 152 |
+
for ln in lines[-n:]:
|
| 153 |
+
try:
|
| 154 |
+
out.append(json.loads(ln))
|
| 155 |
+
except Exception:
|
| 156 |
+
continue
|
| 157 |
+
return out
|
| 158 |
+
|
| 159 |
+
LOGGER = TraceLogger(RUN_LOG_PATH)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
# =========================
|
| 163 |
+
# Deterministic "model tools" (evidence-friendly)
|
| 164 |
+
# =========================
|
| 165 |
+
|
| 166 |
+
def fintech_score_tool(income: float, debt: float, credit_score: int) -> Dict[str, Any]:
|
| 167 |
+
"""
|
| 168 |
+
Baseline, interpretable risk score:
|
| 169 |
+
risk = 0.6*(debt/income) + 0.4*((850-credit_score)/850)
|
| 170 |
+
"""
|
| 171 |
+
if income <= 0:
|
| 172 |
+
raise ValueError("income must be > 0")
|
| 173 |
+
if debt < 0:
|
| 174 |
+
raise ValueError("debt must be >= 0")
|
| 175 |
+
if not (300 <= credit_score <= 850):
|
| 176 |
+
raise ValueError("credit_score must be between 300 and 850")
|
| 177 |
+
|
| 178 |
+
dti = debt / income
|
| 179 |
+
gap = (850 - credit_score) / 850
|
| 180 |
+
risk = (0.6 * dti) + (0.4 * gap)
|
| 181 |
+
|
| 182 |
+
return {
|
| 183 |
+
"risk_score_pd": float(risk),
|
| 184 |
+
"intermediates": {"debt_to_income": float(dti), "score_gap": float(gap)},
|
| 185 |
+
"formula": "risk = 0.6*(debt/income) + 0.4*((850-credit_score)/850)",
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
def hitl_policy(risk_score_pd: float, requested_amount: float) -> Dict[str, Any]:
|
| 189 |
+
"""
|
| 190 |
+
HITL gating:
|
| 191 |
+
- High impact -> review
|
| 192 |
+
- High risk -> review
|
| 193 |
+
- Borderline -> review
|
| 194 |
+
- Else -> draft
|
| 195 |
+
"""
|
| 196 |
+
thr = RISK_THRESHOLD
|
| 197 |
+
band = BORDER_BAND
|
| 198 |
+
hi = requested_amount >= HIGH_IMPACT_AMOUNT
|
| 199 |
+
|
| 200 |
+
if hi:
|
| 201 |
+
return {"decision": "Needs Human Review", "reason": "HIGH_IMPACT_CASE", "threshold": thr, "band": band}
|
| 202 |
+
if risk_score_pd >= (thr + band):
|
| 203 |
+
return {"decision": "Needs Human Review", "reason": "HIGH_RISK", "threshold": thr, "band": band}
|
| 204 |
+
if (thr - band) <= risk_score_pd < (thr + band):
|
| 205 |
+
return {"decision": "Needs Human Review", "reason": "BORDERLINE_SCORE", "threshold": thr, "band": band}
|
| 206 |
+
return {"decision": "Decision Draft", "reason": "LOW_RISK", "threshold": thr, "band": band}
|
| 207 |
+
|
| 208 |
+
def te_pricing_tool(
|
| 209 |
+
cogs: float,
|
| 210 |
+
landed: float,
|
| 211 |
+
presale_mult: float,
|
| 212 |
+
discount: float,
|
| 213 |
+
alpha: float = 120.0,
|
| 214 |
+
beta: float = 0.08,
|
| 215 |
+
) -> Dict[str, Any]:
|
| 216 |
+
"""
|
| 217 |
+
Consumer product pricing demo (predictive modeling placeholder):
|
| 218 |
+
- Demand curve placeholder: demand = alpha * exp(-beta * price)
|
| 219 |
+
- Objective: maximize (price - landed) * demand across a grid
|
| 220 |
+
- Constraint: presale >= presale_mult * cogs, and presale >= 7*cogs (company rule)
|
| 221 |
+
- Retail: retail = presale / (1 - discount), retail > presale
|
| 222 |
+
"""
|
| 223 |
+
if cogs <= 0:
|
| 224 |
+
raise ValueError("cogs must be > 0")
|
| 225 |
+
if landed <= 0:
|
| 226 |
+
raise ValueError("landed must be > 0")
|
| 227 |
+
if presale_mult < 1:
|
| 228 |
+
raise ValueError("presale_mult must be >= 1")
|
| 229 |
+
if not (0.0 < discount < 0.9):
|
| 230 |
+
raise ValueError("discount must be in (0, 0.9)")
|
| 231 |
+
|
| 232 |
+
floor = presale_mult * cogs
|
| 233 |
+
grid = [p for p in DEFAULT_PRICE_GRID if p >= floor]
|
| 234 |
+
|
| 235 |
+
# exp without extra deps
|
| 236 |
+
def exp(x: float) -> float:
|
| 237 |
+
return float((2.718281828459045) ** x)
|
| 238 |
+
|
| 239 |
+
best = None
|
| 240 |
+
for p in grid:
|
| 241 |
+
demand = float(alpha * exp(-beta * p))
|
| 242 |
+
profit = (p - landed) * demand
|
| 243 |
+
if best is None or profit > best["objective_profit"]:
|
| 244 |
+
best = {"presale": float(p), "demand": float(demand), "objective_profit": float(profit)}
|
| 245 |
+
|
| 246 |
+
presale = float(best["presale"]) if best else float(floor)
|
| 247 |
+
retail = presale / (1.0 - discount)
|
| 248 |
+
|
| 249 |
+
# round to .99
|
| 250 |
+
def as_99(x: float) -> float:
|
| 251 |
+
v = round(x)
|
| 252 |
+
return float(f"{max(v, 1) - 0.01:.2f}")
|
| 253 |
+
|
| 254 |
+
presale = as_99(presale)
|
| 255 |
+
retail = as_99(retail)
|
| 256 |
+
|
| 257 |
+
checks = {
|
| 258 |
+
"presale_ge_floor": bool(presale >= floor),
|
| 259 |
+
"presale_ge_7xcogs": bool(presale >= 7.0 * cogs),
|
| 260 |
+
"retail_gt_presale": bool(retail > presale),
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
return {
|
| 264 |
+
"inputs": {"cogs": cogs, "landed": landed, "presale_mult": presale_mult, "discount": discount, "alpha": alpha, "beta": beta},
|
| 265 |
+
"presale_price": presale,
|
| 266 |
+
"retail_price": retail,
|
| 267 |
+
"unit_margin_presale": presale - landed,
|
| 268 |
+
"unit_margin_retail": retail - landed,
|
| 269 |
+
"optimization": best,
|
| 270 |
+
"policy_checks": checks,
|
| 271 |
+
"demand_model": "alpha * exp(-beta * price) (placeholder)",
|
| 272 |
+
"notes": "Replace alpha/beta with real presale conversion or fitted demand model.",
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
def te_benchmark_placeholder() -> Dict[str, Any]:
|
| 276 |
+
"""
|
| 277 |
+
No browsing in this demo. Provide a safe placeholder list.
|
| 278 |
+
The OpenAI agent can draft an unverified benchmark list (marked as draft).
|
| 279 |
+
"""
|
| 280 |
+
return {
|
| 281 |
+
"benchmark_items": [
|
| 282 |
+
{"category": "smart_plug", "brand": "TP-Link Kasa"},
|
| 283 |
+
{"category": "smart_plug", "brand": "Amazon Smart Plug"},
|
| 284 |
+
{"category": "in_wall_outlet", "brand": "Leviton Decora Smart"},
|
| 285 |
+
{"category": "premium", "brand": "Eve (Matter/Thread)"},
|
| 286 |
+
],
|
| 287 |
+
"limitations": "Placeholder only. Verify with real market data.",
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
# =========================
|
| 292 |
+
# LangChain tools (for ReAct agent)
|
| 293 |
+
# =========================
|
| 294 |
+
|
| 295 |
+
def build_tools():
|
| 296 |
+
from langchain_core.tools import tool
|
| 297 |
+
|
| 298 |
+
@tool("fintech_score")
|
| 299 |
+
def fintech_score(income: float, debt: float, credit_score: int) -> str:
|
| 300 |
+
"""Compute a baseline risk score (PD) with intermediates. Returns JSON string."""
|
| 301 |
+
out = fintech_score_tool(income, debt, credit_score)
|
| 302 |
+
return json.dumps(out)
|
| 303 |
+
|
| 304 |
+
@tool("hitl_route")
|
| 305 |
+
def hitl_route(score_pd: float, requested_amount: float) -> str:
|
| 306 |
+
"""Apply HITL policy routing. Returns JSON string."""
|
| 307 |
+
out = hitl_policy(score_pd, requested_amount)
|
| 308 |
+
return json.dumps(out)
|
| 309 |
+
|
| 310 |
+
@tool("te_pricing")
|
| 311 |
+
def te_pricing(cogs: float, landed: float, presale_mult: float, discount: float) -> str:
|
| 312 |
+
"""Compute presale + retail pricing under constraints. Returns JSON string."""
|
| 313 |
+
out = te_pricing_tool(cogs, landed, presale_mult, discount)
|
| 314 |
+
return json.dumps(out)
|
| 315 |
+
|
| 316 |
+
@tool("te_benchmark_placeholder")
|
| 317 |
+
def te_benchmark() -> str:
|
| 318 |
+
"""Return a placeholder competitor benchmark list. Returns JSON string."""
|
| 319 |
+
return json.dumps(te_benchmark_placeholder())
|
| 320 |
+
|
| 321 |
+
return [fintech_score, hitl_route, te_pricing, te_benchmark]
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
# =========================
|
| 325 |
+
# LangGraph agent with memory (thread_id)
|
| 326 |
+
# =========================
|
| 327 |
+
|
| 328 |
+
def build_checkpointer():
|
| 329 |
+
"""
|
| 330 |
+
Use SQLite checkpointer if available; otherwise memory.
|
| 331 |
+
We use SqliteSaver(conn) to avoid context-manager issues.
|
| 332 |
+
"""
|
| 333 |
+
try:
|
| 334 |
+
from langgraph.checkpoint.sqlite import SqliteSaver
|
| 335 |
+
conn = sqlite3.connect(str(CHECKPOINT_PATH), check_same_thread=False)
|
| 336 |
+
return SqliteSaver(conn), "sqlite"
|
| 337 |
+
except Exception:
|
| 338 |
+
from langgraph.checkpoint.memory import InMemorySaver
|
| 339 |
+
return InMemorySaver(), "memory"
|
| 340 |
+
|
| 341 |
+
CHECKPOINTER, CHECKPOINTER_KIND = build_checkpointer()
|
| 342 |
+
|
| 343 |
+
def build_agent():
|
| 344 |
+
"""
|
| 345 |
+
Create a tool-calling ReAct agent with memory.
|
| 346 |
+
This follows the LangGraph "add memory" pattern using a checkpointer keyed by thread_id.
|
| 347 |
+
"""
|
| 348 |
+
from langchain_openai import ChatOpenAI
|
| 349 |
+
|
| 350 |
+
llm = ChatOpenAI(model=LLM_MODEL, temperature=0)
|
| 351 |
+
|
| 352 |
+
tools = build_tools()
|
| 353 |
+
|
| 354 |
+
# Compatibility: create_react_agent moved across versions.
|
| 355 |
+
try:
|
| 356 |
+
from langgraph.prebuilt import create_react_agent # older path
|
| 357 |
+
agent = create_react_agent(llm, tools, checkpointer=CHECKPOINTER)
|
| 358 |
+
return agent
|
| 359 |
+
except Exception:
|
| 360 |
+
# Newer versions may not have prebuilt; fallback to langchain.agents
|
| 361 |
+
from langchain.agents import create_react_agent as lc_create_react_agent
|
| 362 |
+
agent = lc_create_react_agent(llm, tools)
|
| 363 |
+
return agent
|
| 364 |
+
|
| 365 |
+
AGENT = None
|
| 366 |
+
|
| 367 |
+
def get_agent():
|
| 368 |
+
global AGENT
|
| 369 |
+
if AGENT is None:
|
| 370 |
+
AGENT = build_agent()
|
| 371 |
+
return AGENT
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
# =========================
|
| 375 |
+
# Agent runner (traceable wrapper)
|
| 376 |
+
# =========================
|
| 377 |
+
|
| 378 |
+
def invoke_agent(thread_id: str, task_type: str, user_prompt: str) -> Dict[str, Any]:
|
| 379 |
+
"""
|
| 380 |
+
Run the agent under a thread_id, record a traceable payload, return payload.
|
| 381 |
+
"""
|
| 382 |
+
meta = run_metadata(task_type)
|
| 383 |
+
steps: List[StepTrace] = []
|
| 384 |
+
|
| 385 |
+
# Step: memory touch (invoke a noop by reading state indirectly)
|
| 386 |
+
# We rely on checkpointer in agent; we still record thread_id + checkpointer kind as evidence.
|
| 387 |
+
steps.append(StepTrace(
|
| 388 |
+
step_id=new_id("step"),
|
| 389 |
+
name="memory_scope",
|
| 390 |
+
started_at=utc_now(),
|
| 391 |
+
ended_at=utc_now(),
|
| 392 |
+
duration_ms=0,
|
| 393 |
+
inputs={"thread_id": thread_id},
|
| 394 |
+
outputs={"checkpointer_kind": CHECKPOINTER_KIND},
|
| 395 |
+
evidence={"note": "Memory is keyed by thread_id via LangGraph checkpointer."},
|
| 396 |
+
))
|
| 397 |
+
|
| 398 |
+
agent = get_agent()
|
| 399 |
+
|
| 400 |
+
t0 = time.time()
|
| 401 |
+
err = None
|
| 402 |
+
raw = ""
|
| 403 |
+
try:
|
| 404 |
+
config = {"configurable": {"thread_id": thread_id}}
|
| 405 |
+
result = agent.invoke({"messages": [{"role": "user", "content": user_prompt}]}, config=config)
|
| 406 |
+
raw = result["messages"][-1].content if result and "messages" in result else ""
|
| 407 |
+
raw = redact_text(raw)
|
| 408 |
+
except Exception as e:
|
| 409 |
+
err = redact_text(str(e))
|
| 410 |
+
|
| 411 |
+
steps.append(StepTrace(
|
| 412 |
+
step_id=new_id("step"),
|
| 413 |
+
name="agent_invoke",
|
| 414 |
+
started_at=utc_now(),
|
| 415 |
+
ended_at=utc_now(),
|
| 416 |
+
duration_ms=int((time.time() - t0) * 1000),
|
| 417 |
+
inputs={"task_type": task_type},
|
| 418 |
+
outputs={"raw_text_preview": raw[:2000]},
|
| 419 |
+
error=err,
|
| 420 |
+
evidence={"llm_model": LLM_MODEL},
|
| 421 |
+
))
|
| 422 |
+
|
| 423 |
+
payload = {
|
| 424 |
+
**meta,
|
| 425 |
+
"decision": "Needs Human Review" if err else "Draft",
|
| 426 |
+
"result": {"raw_text": raw, "error": err},
|
| 427 |
+
"evidence": {"steps": [asdict(s) for s in steps]},
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
LOGGER.log(payload)
|
| 431 |
+
return redact(payload)
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
# =========================
|
| 435 |
+
# Prompt templates (keep it simple + tool-focused)
|
| 436 |
+
# =========================
|
| 437 |
+
|
| 438 |
+
def fintech_prompt(income: float, debt: float, credit_score: int, requested_amount: float) -> str:
|
| 439 |
+
return f"""
|
| 440 |
+
You are running the FinTech credit risk demo.
|
| 441 |
+
Use tools in this order:
|
| 442 |
+
1) fintech_score(income, debt, credit_score)
|
| 443 |
+
2) hitl_route(score_pd, requested_amount)
|
| 444 |
+
|
| 445 |
+
Then return:
|
| 446 |
+
- A short decision summary (Decision Draft vs Needs Human Review) and the reason.
|
| 447 |
+
- Include the tool JSON outputs in the response (copy them).
|
| 448 |
+
Inputs:
|
| 449 |
+
income={income}
|
| 450 |
+
debt={debt}
|
| 451 |
+
credit_score={credit_score}
|
| 452 |
+
requested_amount={requested_amount}
|
| 453 |
+
""".strip()
|
| 454 |
+
|
| 455 |
+
def te_pricing_prompt(cogs: float, landed: float, mult: float, discount: float) -> str:
|
| 456 |
+
return f"""
|
| 457 |
+
You are running the TE pricing demo (consumer product use case).
|
| 458 |
+
Constraints:
|
| 459 |
+
- presale >= 7 * COGS
|
| 460 |
+
- retail > presale
|
| 461 |
+
Use tools:
|
| 462 |
+
1) te_pricing(cogs, landed, presale_mult, discount)
|
| 463 |
+
2) te_benchmark_placeholder()
|
| 464 |
+
|
| 465 |
+
Then return:
|
| 466 |
+
- Suggested presale and retail prices (from tool output)
|
| 467 |
+
- Policy checks status
|
| 468 |
+
- A short note on what data we need to make the demand model more accurate
|
| 469 |
+
Inputs:
|
| 470 |
+
cogs={cogs}
|
| 471 |
+
landed={landed}
|
| 472 |
+
presale_mult={mult}
|
| 473 |
+
discount={discount}
|
| 474 |
+
""".strip()
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
# =========================
|
| 478 |
+
# Gradio UI
|
| 479 |
+
# =========================
|
| 480 |
+
|
| 481 |
+
def build_gradio_app():
|
| 482 |
+
import gradio as gr
|
| 483 |
+
|
| 484 |
+
def run_fintech(income, debt, credit_score, requested_amount, thread_id):
|
| 485 |
+
prompt = fintech_prompt(float(income), float(debt), int(credit_score), float(requested_amount))
|
| 486 |
+
payload = invoke_agent(thread_id=str(thread_id), task_type="fintech_credit_risk", user_prompt=prompt)
|
| 487 |
+
return payload["run_id"], json.dumps(payload, indent=2)
|
| 488 |
+
|
| 489 |
+
def run_te(cogs, landed, mult, discount, thread_id):
|
| 490 |
+
prompt = te_pricing_prompt(float(cogs), float(landed), float(mult), float(discount))
|
| 491 |
+
payload = invoke_agent(thread_id=str(thread_id), task_type="te_pricing", user_prompt=prompt)
|
| 492 |
+
return payload["run_id"], json.dumps(payload, indent=2)
|
| 493 |
+
|
| 494 |
+
def view_logs(n):
|
| 495 |
+
return json.dumps(LOGGER.tail(int(n)), indent=2)
|
| 496 |
+
|
| 497 |
+
with gr.Blocks(title="Demo B") as demo:
|
| 498 |
+
gr.Markdown(
|
| 499 |
+
"## LangGraph Memory + Traceable Demo\n"
|
| 500 |
+
"This demo shows a LangGraph/LangChain agent with memory (thread_id) and traceable run logs.\n"
|
| 501 |
+
f"- Checkpointer: **{CHECKPOINTER_KIND}**\n"
|
| 502 |
+
)
|
| 503 |
+
|
| 504 |
+
with gr.Row():
|
| 505 |
+
thread_id = gr.Textbox(value="demo_thread", label="thread_id (memory scope)")
|
| 506 |
+
gr.Markdown(f"Logs: `{RUN_LOG_PATH}` \nCheckpoints: `{CHECKPOINT_PATH}`")
|
| 507 |
+
|
| 508 |
+
with gr.Tabs():
|
| 509 |
+
with gr.Tab("FinTech: Credit Risk Demo"):
|
| 510 |
+
gr.Markdown("Simple form. Agent calls tools and returns a traceable payload.")
|
| 511 |
+
with gr.Row():
|
| 512 |
+
income = gr.Number(value=75000, label="Income (annual)")
|
| 513 |
+
debt = gr.Number(value=30000, label="Debt (total)")
|
| 514 |
+
credit_score = gr.Number(value=680, label="Credit score (300-850)")
|
| 515 |
+
requested_amount = gr.Number(value=250000, label="Requested amount")
|
| 516 |
+
btn = gr.Button("Run FinTech agent")
|
| 517 |
+
out_run = gr.Textbox(label="run_id")
|
| 518 |
+
out_json = gr.Textbox(label="traceable output JSON", lines=22)
|
| 519 |
+
btn.click(fn=run_fintech, inputs=[income, debt, credit_score, requested_amount, thread_id], outputs=[out_run, out_json])
|
| 520 |
+
|
| 521 |
+
with gr.Tab("TE: Pricing Demo"):
|
| 522 |
+
gr.Markdown(
|
| 523 |
+
"Consumer product use case. Agent computes presale + retail under constraints and shows benchmark placeholder.\n"
|
| 524 |
+
"For a real benchmark, replace placeholder with curated market dataset (or verified research)."
|
| 525 |
+
)
|
| 526 |
+
with gr.Row():
|
| 527 |
+
cogs = gr.Number(value=DEFAULT_COGS, label="COGS per unit")
|
| 528 |
+
landed = gr.Number(value=DEFAULT_LANDED, label="Landed cost per unit")
|
| 529 |
+
mult = gr.Number(value=DEFAULT_PRESALE_MULT, label="Presale floor multiplier (>=7)")
|
| 530 |
+
discount = gr.Slider(0.10, 0.40, value=DEFAULT_PRESALE_DISCOUNT, step=0.05, label="Presale discount vs retail")
|
| 531 |
+
btn2 = gr.Button("Run TE pricing agent")
|
| 532 |
+
out_run2 = gr.Textbox(label="run_id")
|
| 533 |
+
out_json2 = gr.Textbox(label="traceable output JSON", lines=22)
|
| 534 |
+
btn2.click(fn=run_te, inputs=[cogs, landed, mult, discount, thread_id], outputs=[out_run2, out_json2])
|
| 535 |
+
|
| 536 |
+
with gr.Tab("Trace Logs"):
|
| 537 |
+
n = gr.Slider(10, 200, value=30, step=10, label="show last N runs")
|
| 538 |
+
btn3 = gr.Button("Refresh logs")
|
| 539 |
+
logs_out = gr.Textbox(lines=24, label="logs (JSON list)")
|
| 540 |
+
btn3.click(fn=view_logs, inputs=[n], outputs=[logs_out])
|
| 541 |
+
|
| 542 |
+
gr.Markdown(
|
| 543 |
+
"### Hugging Face deploy\n"
|
| 544 |
+
"1) Rename this file to `app.py`\n"
|
| 545 |
+
"2) Use the provided requirements file as `requirements.txt`\n"
|
| 546 |
+
"3) Add `OPENAI_API_KEY` in Space Secrets\n\n"
|
| 547 |
+
"Note: I can't provide a Hugging Face account for org access. Use your own HF username and ask to be added."
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
return demo
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
def main():
|
| 554 |
+
demo = build_gradio_app()
|
| 555 |
+
demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))
|
| 556 |
+
|
| 557 |
+
|
| 558 |
+
if __name__ == "__main__":
|
| 559 |
+
main()
|
| 560 |
+
|
| 561 |
+
# http://localhost:7860
|
requirements_demo.txt → requirements_demo_a.txt
RENAMED
|
File without changes
|
requirements_demo_b.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio>=4.0.0
|
| 2 |
+
langgraph>=1.0.0
|
| 3 |
+
langgraph-checkpoint>=1.0.0
|
| 4 |
+
langgraph-checkpoint-sqlite>=1.0.0
|
| 5 |
+
langchain-core>=0.2.0
|
| 6 |
+
langchain-openai>=0.1.0
|
| 7 |
+
openai>=1.0.0
|