x402Hyper_Layer / app.py
X402hyperlayer's picture
Update app.py
21cef86 verified
# app.py
import time
import random
import gradio as gr
# -------------------------
# Minimal HyperLayer demo
# - mode: choose demo behavior
# - latency: simulate zero / low / higher latency for demo
# - examples: quick tryouts
# -------------------------
def simulate_processing(prompt: str, mode: str, latency_ms: int):
"""
Simulate HyperLayer AI agent processing.
- prompt: user input
- mode: 'Explain', 'Summarize', 'Detect Intent', 'Tokenize'
- latency_ms: simulated processing time in milliseconds
"""
# simulate processing delay
time.sleep(max(0, latency_ms) / 1000.0)
# create deterministic-ish fake outputs for demo
if not prompt.strip():
return "Please enter a prompt to see simulated HyperLayer output."
base = f"[Mode: {mode}]"
if mode == "Explain":
out = f"{base} Explanation: HyperLayer interprets your input and returns a concise, technical summary.\n\nInput: {prompt}\n\nSummary: {prompt[:120]}... (simulated explanation)"
elif mode == "Summarize":
out = f"{base} Summary: {prompt[:140]}... (simulated short summary)"
elif mode == "Detect Intent":
intents = ["query_price", "execute_trade", "get_balance", "unknown"]
detected = random.choice(intents)
out = f"{base} Detected intent = `{detected}` (confidence: {random.uniform(0.6,0.99):.2f})"
elif mode == "Tokenize":
tokens = prompt.split()
out = f"{base} Tokens ({len(tokens)}): " + ", ".join(tokens[:20])
else:
out = f"{base} Echo: {prompt}"
# simulate metadata block
meta = f"\n\n---\nSimulated latency: {latency_ms} ms • Node: x402-demo-01 • timestamp: {time.strftime('%Y-%m-%d %H:%M:%S')}"
return out + meta
# Gradio UI details
title = "🛰️ HyperLayer (x402) — Demo Playground"
description = """
**HyperLayer demo** — lightweight simulation of zero-latency AI agent responses on a Solana-native infra.
Use the controls to pick a mode, set simulated latency and try sample prompts.
*This Space shows a demo-only simulation for presentation / hackathon purposes.*
"""
with gr.Blocks(title=title) as demo:
gr.Markdown(f"# {title}\n\n{description}")
with gr.Row():
with gr.Column(scale=2):
prompt = gr.Textbox(lines=4, label="Prompt / Data stream", placeholder="Type a request for the AI agent...")
mode = gr.Dropdown(choices=["Explain", "Summarize", "Detect Intent", "Tokenize", "Echo"], value="Explain", label="Mode")
latency = gr.Slider(minimum=0, maximum=2000, step=50, value=50, label="Simulated latency (ms)")
run_btn = gr.Button("Run Demo")
examples = gr.Examples(
examples=[
["Evaluate arbitrage opportunities between market A and B", "Explain", 50],
["Summarize last 24h orderbook activity for X token", "Summarize", 120],
["transfer 0.5 SOL to 0xabc...z", "Detect Intent", 30],
["tokenize: buy 100 sell 50", "Tokenize", 0],
],
inputs=[prompt, mode, latency],
label="Try examples"
)
with gr.Column(scale=1):
output = gr.Textbox(lines=12, label="Agent Output (simulated)")
gr.Markdown("**Notes**\n- This is a demo to showcase how real-time agent responses would look.\n- Replace simulation with real RPC / model calls for production.")
def run(prompt_text, mode_val, latency_val):
return simulate_processing(prompt_text, mode_val, int(latency_val))
run_btn.click(fn=run, inputs=[prompt, mode, latency], outputs=[output])
# add default examples
prompt.submit(fn=run, inputs=[prompt, mode, latency], outputs=[output])
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
model_id = "x402_hyperlayer_model"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
def chat(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=100)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
demo = gr.Interface(fn=chat, inputs="text", outputs="text", title="X402 HyperLayer Model")
demo.launch()