| import gradio as gr |
| import requests |
| import os |
|
|
| |
| BACKEND_URL = os.environ.get("KERNL_BACKEND_URL", "").rstrip('/') |
|
|
| if not BACKEND_URL: |
| BACKEND_URL = None |
|
|
| def query_kernl(scenario, with_brain): |
| """Call the Kernl /agent/handle endpoint and format the response.""" |
| if not BACKEND_URL: |
| return "β Backend URL not configured. Please set the KERNL_BACKEND_URL secret." |
| |
| if not scenario or not scenario.strip(): |
| return "β Please enter a scenario." |
| |
| try: |
| response = requests.post( |
| f"{BACKEND_URL}/agent/handle", |
| json={ |
| "company_id": "rivanly-inc", |
| "scenario": scenario, |
| "with_brain": with_brain |
| }, |
| timeout=30 |
| ) |
| response.raise_for_status() |
| data = response.json() |
| |
| output = [] |
| output.append(f"**Action:** `{data.get('action', 'N/A')}`") |
| output.append(f"**Rule Applied:** `{data.get('rule_applied', 'N/A')}`") |
| output.append(f"**Message:** {data.get('message_to_customer', data.get('answer', 'N/A'))}") |
| if data.get('evidence'): |
| output.append(f"**Evidence:** {data.get('evidence')}") |
| output.append(f"**Skill Matched:** `{data.get('skill_matched', 'N/A')}`") |
| output.append(f"**Confidence:** `{data.get('confidence', 'N/A')}`") |
| |
| return "\n\n".join(output) |
| |
| except requests.exceptions.ConnectionError: |
| return "β Cannot connect to Kernl backend." |
| except requests.exceptions.Timeout: |
| return "β Request timed out." |
| except Exception as e: |
| return f"β Error: {str(e)}" |
|
|
| |
| theme = gr.themes.Soft( |
| primary_hue="teal", |
| secondary_hue="teal", |
| neutral_hue="gray", |
| font=gr.themes.GoogleFont("Inter") |
| ) |
|
|
| with gr.Blocks(theme=theme, title="Kernl β Operational Memory for AI Agents") as demo: |
| gr.Markdown(""" |
| # π§ Kernl |
| ### Operational memory for AI agents |
| |
| Kernl compiles how your company actually decides things β from Slack, SOPs, and tickets β into an executable skills file. |
| Any agent. Any task. Correct every time. |
| """) |
| |
| with gr.Row(): |
| with gr.Column(scale=1): |
| scenario_input = gr.Textbox( |
| label="Enter your business scenario", |
| placeholder="Example: Enterprise customer, 18 months tenure, wants $1,200 refund", |
| lines=4 |
| ) |
| with_brain_toggle = gr.Checkbox( |
| label="π§ Use Company Brain (Kernl)", |
| value=True, |
| info="ON = Kernl uses compiled company knowledge. OFF = generic AI answer." |
| ) |
| submit_btn = gr.Button("Ask Kernl", variant="primary", size="lg") |
| |
| with gr.Column(scale=2): |
| output_box = gr.Markdown(label="Kernl's Response", value="*Your answer will appear here...*") |
| |
| gr.Markdown(""" |
| --- |
| ### Try these example scenarios (copy & paste): |
| |
| - `Enterprise customer, 18 months tenure, wants $1,200 refund` |
| - `Annual plan customer, day 10 of subscription, $300 refund requested` |
| - `Customer reporting P0 bug on dashboard, enterprise plan` |
| - `Customer showing 3 churn signals in last 30 days` |
| - `Startup requesting 40% discount` |
| |
| --- |
| **Built with AMD MI300X, vLLM, and LangGraph** | [GitHub](https://github.com/your-repo) | **Track 1: AI Agents & Agentic Workflows** |
| """) |
| |
| submit_btn.click( |
| fn=query_kernl, |
| inputs=[scenario_input, with_brain_toggle], |
| outputs=output_box |
| ) |
|
|
| demo.launch() |