Mbanksbey commited on
Commit
23942e2
·
verified ·
1 Parent(s): 8ee5267

Create app.py - Gradio inference interface

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import json
4
+ from pathlib import Path
5
+
6
+ # TEQUMSA Space Kernel
7
+ try:
8
+ from tequmsa_space_kernel import TEQUMSAInferenceNode
9
+ INFERENCE_NODE = TEQUMSAInferenceNode()
10
+ except ImportError:
11
+ INFERENCE_NODE = None
12
+
13
+ # Inference Router
14
+ try:
15
+ from inference_router import InferenceRouter
16
+ ROUTER = InferenceRouter()
17
+ except ImportError:
18
+ ROUTER = None
19
+
20
+ def process_request(prompt: str, model_selection: str, mode: str):
21
+ """Process inference request through TEQUMSA orchestration."""
22
+ if not prompt or not prompt.strip():
23
+ return "Please enter a prompt."
24
+
25
+ if INFERENCE_NODE:
26
+ result = INFERENCE_NODE.process(
27
+ prompt=prompt,
28
+ model_selection=model_selection,
29
+ mode=mode
30
+ )
31
+ return json.dumps(result, indent=2)
32
+ else:
33
+ return json.dumps({
34
+ "status": "fallback",
35
+ "message": "Inference node not loaded",
36
+ "prompt": prompt,
37
+ "model": model_selection,
38
+ "mode": mode
39
+ }, indent=2)
40
+
41
+ def route_inference(prompt: str, target_model: str):
42
+ """Route inference through the router."""
43
+ if ROUTER:
44
+ route = ROUTER.route(prompt, target_model)
45
+ return json.dumps(route, indent=2)
46
+ return json.dumps({"status": "router_unavailable"}, indent=2)
47
+
48
+ with gr.Blocks(title="TEQUMSA Inference Node", theme=gr.themes.Base()) as demo:
49
+ gr.Markdown("# TEQUMSA Symbiotic Orchestrator - Inference Node")
50
+ gr.Markdown("Autonomous multi-agent inference routing and execution.")
51
+
52
+ with gr.Tab("Inference"):
53
+ prompt_input = gr.Textbox(
54
+ label="Prompt",
55
+ placeholder="Enter your prompt here...",
56
+ lines=5
57
+ )
58
+ with gr.Row():
59
+ model_dropdown = gr.Dropdown(
60
+ choices=["claude", "gpt", "gemini", "perplexity", "auto"],
61
+ value="auto",
62
+ label="Model Selection"
63
+ )
64
+ mode_dropdown = gr.Dropdown(
65
+ choices=["standard", "recursive", "causal", "rdod"],
66
+ value="standard",
67
+ label="Execution Mode"
68
+ )
69
+ process_btn = gr.Button("Process Request", variant="primary")
70
+ output = gr.Textbox(label="Response", lines=10)
71
+ process_btn.click(
72
+ fn=process_request,
73
+ inputs=[prompt_input, model_dropdown, mode_dropdown],
74
+ outputs=output
75
+ )
76
+
77
+ with gr.Tab("Router"):
78
+ router_prompt = gr.Textbox(
79
+ label="Prompt to Route",
80
+ placeholder="Enter prompt for routing analysis...",
81
+ lines=3
82
+ )
83
+ router_model = gr.Textbox(label="Target Model", value="auto")
84
+ route_btn = gr.Button("Analyze Route", variant="secondary")
85
+ route_output = gr.Textbox(label="Route Analysis", lines=8)
86
+ route_btn.click(
87
+ fn=route_inference,
88
+ inputs=[router_prompt, router_model],
89
+ outputs=route_output
90
+ )
91
+
92
+ demo.launch()