ZENLLC commited on
Commit
fd2b1fa
·
verified ·
1 Parent(s): 4db636e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -3
app.py CHANGED
@@ -14,7 +14,12 @@ if device == "cuda":
14
  else:
15
  text_model_name = "google/flan-t5-base" # CPU-friendly
16
 
17
- chat_model = pipeline("text-generation", model=text_model_name, device=0 if device=="cuda" else -1)
 
 
 
 
 
18
 
19
  # ----------------------------
20
  # Try to load Stable Diffusion (only if GPU)
@@ -53,7 +58,7 @@ def query_llm(prompt, history, persona):
53
  input_text += f"User: {prompt}\nAssistant:"
54
 
55
  out = chat_model(input_text, max_new_tokens=300, do_sample=True, temperature=0.7)
56
- return out[0]["generated_text"].split("Assistant:")[-1].strip()
57
 
58
 
59
  def multimodal_chat(user_msg, history, persona):
@@ -94,7 +99,15 @@ def multimodal_chat(user_msg, history, persona):
94
 
95
 
96
  with gr.Blocks(css="style.css") as demo:
97
- gr.Markdown("🧠 **ZEN Research Lab (Adaptive Edition)** — works everywhere, GPU unlocks extra powers", elem_id="zen-header")
 
 
 
 
 
 
 
 
98
 
99
  persona = gr.Dropdown(["Default","Analyst","Artist","Futurist","Philosopher"], label="Mode", value="Default")
100
  chatbot = gr.Chatbot(label="Conversation", height=400)
@@ -119,5 +132,18 @@ with gr.Blocks(css="style.css") as demo:
119
  user_msg.submit(respond, inputs=[user_msg, chatbot, persona],
120
  outputs=[chatbot, img_out, chart_out])
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  if __name__ == "__main__":
123
  demo.queue(max_size=50).launch()
 
14
  else:
15
  text_model_name = "google/flan-t5-base" # CPU-friendly
16
 
17
+ chat_model = pipeline(
18
+ "text-generation",
19
+ model=text_model_name,
20
+ device=0 if device=="cuda" else -1,
21
+ return_full_text=False
22
+ )
23
 
24
  # ----------------------------
25
  # Try to load Stable Diffusion (only if GPU)
 
58
  input_text += f"User: {prompt}\nAssistant:"
59
 
60
  out = chat_model(input_text, max_new_tokens=300, do_sample=True, temperature=0.7)
61
+ return out[0]["generated_text"].strip()
62
 
63
 
64
  def multimodal_chat(user_msg, history, persona):
 
99
 
100
 
101
  with gr.Blocks(css="style.css") as demo:
102
+ gr.Markdown("🧠 **ZEN Research Lab (Adaptive Edition)**", elem_id="zen-header")
103
+
104
+ # Capabilities banner
105
+ cap_text = "✅ Text ✅ Charts ✅ Simulation"
106
+ if sd_model is not None:
107
+ cap_text += " ✅ Images"
108
+ else:
109
+ cap_text += " ❌ Images (GPU required)"
110
+ gr.Markdown(cap_text)
111
 
112
  persona = gr.Dropdown(["Default","Analyst","Artist","Futurist","Philosopher"], label="Mode", value="Default")
113
  chatbot = gr.Chatbot(label="Conversation", height=400)
 
132
  user_msg.submit(respond, inputs=[user_msg, chatbot, persona],
133
  outputs=[chatbot, img_out, chart_out])
134
 
135
+ # Example starter buttons
136
+ with gr.Accordion("✨ Try these examples"):
137
+ with gr.Row():
138
+ gr.Examples(
139
+ examples=[
140
+ ["Draw a futuristic city skyline at night"],
141
+ ["Simulate first contact with an alien civilization"],
142
+ ["Make a chart of AI adoption from 2010 to 2030"],
143
+ ["Explain quantum entanglement in simple terms"],
144
+ ],
145
+ inputs=[user_msg]
146
+ )
147
+
148
  if __name__ == "__main__":
149
  demo.queue(max_size=50).launch()