ZENLLC commited on
Commit
92a0a6b
·
verified ·
1 Parent(s): 26c245c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -0
app.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr, json, plotly.graph_objects as go
2
+ from transformers import pipeline
3
+ from diffusers import StableDiffusionPipeline
4
+ import torch
5
+
6
+ # ----------------------------
7
+ # Load models once on startup
8
+ # ----------------------------
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+
11
+ # Text model (fast chat)
12
+ chat_model = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", device=0 if device=="cuda" else -1)
13
+
14
+ # Image model (stable diffusion)
15
+ sd_model = StableDiffusionPipeline.from_pretrained(
16
+ "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16 if device=="cuda" else torch.float32
17
+ ).to(device)
18
+
19
+
20
+ SYSTEM_PROMPT = """You are ZEN Research Assistant.
21
+ You can respond in ONE of these forms:
22
+ - Image → {"type":"image","prompt":"<prompt>"}
23
+ - Chart → {"type":"chart","title":"<chart title>","data":[{"x":[...], "y":[...], "label":"<series>"}]}
24
+ - Simulation → {"type":"simulation","topic":"<title>","steps":["...", "..."]}
25
+ - Text → plain conversation, explanation, or reasoning.
26
+
27
+ Rules:
28
+ - Use JSON ONLY for image, chart, or simulation.
29
+ - Simulation = imaginative thought experiment, 3–6 steps.
30
+ - If not sure, default to conversational text.
31
+ """
32
+
33
+ def query_llm(prompt, history, persona):
34
+ # Construct conversation
35
+ input_text = SYSTEM_PROMPT
36
+ if persona != "Default":
37
+ input_text += f"\nPersona: {persona}\n"
38
+ for u, a in history:
39
+ input_text += f"User: {u}\nAssistant: {a}\n"
40
+ input_text += f"User: {prompt}\nAssistant:"
41
+
42
+ out = chat_model(input_text, max_new_tokens=400, do_sample=True, temperature=0.7)
43
+ return out[0]["generated_text"].split("Assistant:")[-1].strip()
44
+
45
+
46
+ def multimodal_chat(user_msg, history, persona):
47
+ history = history or []
48
+ assistant_content = query_llm(user_msg, history, persona)
49
+
50
+ img, fig = None, None
51
+ try:
52
+ parsed = json.loads(assistant_content)
53
+
54
+ if parsed.get("type") == "image":
55
+ img = sd_model(parsed["prompt"]).images[0]
56
+ history.append([user_msg, "🖼️ Generated image below."])
57
+
58
+ elif parsed.get("type") == "chart":
59
+ fig = go.Figure()
60
+ for s in parsed["data"]:
61
+ fig.add_trace(go.Scatter(
62
+ x=s["x"], y=s["y"], mode="lines+markers", name=s.get("label","")
63
+ ))
64
+ fig.update_layout(title=parsed.get("title","Chart"))
65
+ history.append([user_msg, parsed.get("title","Chart below")])
66
+
67
+ elif parsed.get("type") == "simulation":
68
+ steps = "\n".join([f"→ {s}" for s in parsed["steps"]])
69
+ history.append([user_msg, f"🔮 Simulation: {parsed.get('topic','Exploration')}\n{steps}"])
70
+
71
+ else:
72
+ history.append([user_msg, assistant_content])
73
+
74
+ except (json.JSONDecodeError, KeyError, TypeError):
75
+ history.append([user_msg, assistant_content])
76
+
77
+ return history, img, fig
78
+
79
+
80
+ with gr.Blocks(css="style.css") as demo:
81
+ gr.Markdown("🧠 **ZEN Research Lab (API-free Edition)** — Explore, simulate, and create", elem_id="zen-header")
82
+
83
+ persona = gr.Dropdown(["Default","Analyst","Artist","Futurist","Philosopher"], label="Mode", value="Default")
84
+ chatbot = gr.Chatbot(label="Conversation", height=400)
85
+
86
+ with gr.Row():
87
+ user_msg = gr.Textbox(placeholder="Ask me anything…", label="Your message", scale=4)
88
+ send_btn = gr.Button("Send", variant="primary")
89
+
90
+ img_out = gr.Image(label="Generated image")
91
+ chart_out = gr.Plot(label="Interactive chart")
92
+
93
+ def respond(user_msg, chat_history, persona):
94
+ chat_history, img, fig = multimodal_chat(user_msg, chat_history, persona)
95
+ return (
96
+ chat_history,
97
+ gr.update(value=img) if img else gr.update(value=None),
98
+ gr.update(value=fig) if fig else gr.update(value=None)
99
+ )
100
+
101
+ send_btn.click(respond, inputs=[user_msg, chatbot, persona],
102
+ outputs=[chatbot, img_out, chart_out])
103
+ user_msg.submit(respond, inputs=[user_msg, chatbot, persona],
104
+ outputs=[chatbot, img_out, chart_out])
105
+
106
+ if __name__ == "__main__":
107
+ demo.queue(max_size=50).launch()