X402hyperlayer commited on
Commit
9c8ba5b
·
verified ·
1 Parent(s): 90f1ebb

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -0
app.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import time
3
+ import random
4
+ import gradio as gr
5
+
6
+ # -------------------------
7
+ # Minimal HyperLayer demo
8
+ # - mode: choose demo behavior
9
+ # - latency: simulate zero / low / higher latency for demo
10
+ # - examples: quick tryouts
11
+ # -------------------------
12
+
13
+ def simulate_processing(prompt: str, mode: str, latency_ms: int):
14
+ """
15
+ Simulate HyperLayer AI agent processing.
16
+ - prompt: user input
17
+ - mode: 'Explain', 'Summarize', 'Detect Intent', 'Tokenize'
18
+ - latency_ms: simulated processing time in milliseconds
19
+ """
20
+ # simulate processing delay
21
+ time.sleep(max(0, latency_ms) / 1000.0)
22
+
23
+ # create deterministic-ish fake outputs for demo
24
+ if not prompt.strip():
25
+ return "Please enter a prompt to see simulated HyperLayer output."
26
+
27
+ base = f"[Mode: {mode}]"
28
+
29
+ if mode == "Explain":
30
+ out = f"{base} Explanation: HyperLayer interprets your input and returns a concise, technical summary.\n\nInput: {prompt}\n\nSummary: {prompt[:120]}... (simulated explanation)"
31
+ elif mode == "Summarize":
32
+ out = f"{base} Summary: {prompt[:140]}... (simulated short summary)"
33
+ elif mode == "Detect Intent":
34
+ intents = ["query_price", "execute_trade", "get_balance", "unknown"]
35
+ detected = random.choice(intents)
36
+ out = f"{base} Detected intent = `{detected}` (confidence: {random.uniform(0.6,0.99):.2f})"
37
+ elif mode == "Tokenize":
38
+ tokens = prompt.split()
39
+ out = f"{base} Tokens ({len(tokens)}): " + ", ".join(tokens[:20])
40
+ else:
41
+ out = f"{base} Echo: {prompt}"
42
+
43
+ # simulate metadata block
44
+ meta = f"\n\n---\nSimulated latency: {latency_ms} ms • Node: x402-demo-01 • timestamp: {time.strftime('%Y-%m-%d %H:%M:%S')}"
45
+ return out + meta
46
+
47
+
48
+ # Gradio UI details
49
+ title = "🛰️ HyperLayer (x402) — Demo Playground"
50
+ description = """
51
+ **HyperLayer demo** — lightweight simulation of zero-latency AI agent responses on a Solana-native infra.
52
+ Use the controls to pick a mode, set simulated latency and try sample prompts.
53
+ *This Space shows a demo-only simulation for presentation / hackathon purposes.*
54
+ """
55
+
56
+ with gr.Blocks(title=title) as demo:
57
+ gr.Markdown(f"# {title}\n\n{description}")
58
+
59
+ with gr.Row():
60
+ with gr.Column(scale=2):
61
+ prompt = gr.Textbox(lines=4, label="Prompt / Data stream", placeholder="Type a request for the AI agent...")
62
+ mode = gr.Dropdown(choices=["Explain", "Summarize", "Detect Intent", "Tokenize", "Echo"], value="Explain", label="Mode")
63
+ latency = gr.Slider(minimum=0, maximum=2000, step=50, value=50, label="Simulated latency (ms)")
64
+ run_btn = gr.Button("Run Demo")
65
+ examples = gr.Examples(
66
+ examples=[
67
+ ["Evaluate arbitrage opportunities between market A and B", "Explain", 50],
68
+ ["Summarize last 24h orderbook activity for X token", "Summarize", 120],
69
+ ["transfer 0.5 SOL to 0xabc...z", "Detect Intent", 30],
70
+ ["tokenize: buy 100 sell 50", "Tokenize", 0],
71
+ ],
72
+ inputs=[prompt, mode, latency],
73
+ label="Try examples"
74
+ )
75
+ with gr.Column(scale=1):
76
+ output = gr.Textbox(lines=12, label="Agent Output (simulated)")
77
+ gr.Markdown("**Notes**\n- This is a demo to showcase how real-time agent responses would look.\n- Replace simulation with real RPC / model calls for production.")
78
+
79
+ def run(prompt_text, mode_val, latency_val):
80
+ return simulate_processing(prompt_text, mode_val, int(latency_val))
81
+
82
+ run_btn.click(fn=run, inputs=[prompt, mode, latency], outputs=[output])
83
+
84
+ # add default examples
85
+ prompt.submit(fn=run, inputs=[prompt, mode, latency], outputs=[output])
86
+
87
+ if __name__ == "__main__":
88
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=False)