ZENLLC commited on
Commit
11af061
Β·
verified Β·
1 Parent(s): c3c59d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -43
app.py CHANGED
@@ -1,42 +1,23 @@
1
- import gradio as gr, json, plotly.graph_objects as go, torch
2
  from transformers import pipeline
 
3
 
4
  # ----------------------------
5
- # Detect device
6
  # ----------------------------
7
- device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
9
- # ----------------------------
10
- # Load lightweight text model
11
- # ----------------------------
12
- text_model_name = "google/flan-t5-small" # tiny, CPU-friendly
13
- chat_model = pipeline("text2text-generation", model=text_model_name, device=0 if device=="cuda" else -1)
14
-
15
- # ----------------------------
16
- # Try to load Stable Diffusion (only if GPU)
17
- # ----------------------------
18
- sd_model = None
19
- if device == "cuda":
20
- try:
21
- from diffusers import StableDiffusionPipeline
22
- sd_model = StableDiffusionPipeline.from_pretrained(
23
- "stabilityai/stable-diffusion-2-1-base"
24
- ).to(device)
25
- except Exception as e:
26
- print("⚠️ Could not load Stable Diffusion:", e)
27
- sd_model = None
28
-
29
- # ----------------------------
30
- # Core assistant logic
31
- # ----------------------------
32
  SYSTEM_PROMPT = """You are ZEN Research Assistant.
33
- You can respond in ONE of these forms:
34
  - Image β†’ {"type":"image","prompt":"<prompt>"}
35
  - Chart β†’ {"type":"chart","title":"<chart title>","data":[{"x":[1,2,3], "y":[2,4,6], "label":"Example"}]}
36
  - Simulation β†’ {"type":"simulation","topic":"<title>","steps":["...", "..."]}
37
- - Text β†’ plain conversation.
38
  """
39
 
 
 
 
40
  def query_llm(prompt, history, persona):
41
  input_text = SYSTEM_PROMPT
42
  if persona != "Default":
@@ -45,9 +26,20 @@ def query_llm(prompt, history, persona):
45
  input_text += f"User: {u}\nAssistant: {a}\n"
46
  input_text += f"User: {prompt}\nAssistant:"
47
 
48
- out = chat_model(input_text, max_new_tokens=256)
49
  return out[0]["generated_text"].strip()
50
 
 
 
 
 
 
 
 
 
 
 
 
51
  def multimodal_chat(user_msg, history, persona):
52
  history = history or []
53
  assistant_content = query_llm(user_msg, history, persona)
@@ -57,11 +49,8 @@ def multimodal_chat(user_msg, history, persona):
57
  parsed = json.loads(assistant_content)
58
 
59
  if parsed.get("type") == "image":
60
- if sd_model is not None:
61
- img = sd_model(parsed["prompt"]).images[0]
62
- history.append([user_msg, "πŸ–ΌοΈ Generated image below."])
63
- else:
64
- history.append([user_msg, "⚠️ Image generation requires GPU."])
65
 
66
  elif parsed.get("type") == "chart":
67
  fig = go.Figure()
@@ -79,7 +68,8 @@ def multimodal_chat(user_msg, history, persona):
79
  else:
80
  history.append([user_msg, assistant_content])
81
 
82
- except (json.JSONDecodeError, KeyError, TypeError):
 
83
  history.append([user_msg, assistant_content])
84
 
85
  return history, img, fig
@@ -88,14 +78,9 @@ def multimodal_chat(user_msg, history, persona):
88
  # Gradio UI
89
  # ----------------------------
90
  with gr.Blocks(css="style.css") as demo:
91
- gr.Markdown("🧠 **ZEN Research Lab (Light Mode)**", elem_id="zen-header")
92
-
93
- cap_text = "βœ… Text βœ… Charts βœ… Simulation"
94
- if sd_model is not None:
95
- cap_text += " βœ… Images"
96
- else:
97
- cap_text += " ❌ Images (GPU required)"
98
- gr.Markdown(cap_text)
99
 
100
  persona = gr.Dropdown(["Default","Analyst","Artist","Futurist","Philosopher"], label="Mode", value="Default")
101
  chatbot = gr.Chatbot(label="Conversation", height=400)
 
1
+ import gradio as gr, json, plotly.graph_objects as go
2
  from transformers import pipeline
3
+ from PIL import Image, ImageDraw, ImageFont
4
 
5
  # ----------------------------
6
+ # Load a tiny text model (works on cpu-basic)
7
  # ----------------------------
8
+ chat_model = pipeline("text-generation", model="distilgpt2", device=-1)
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  SYSTEM_PROMPT = """You are ZEN Research Assistant.
11
+ Reply in ONE of these forms:
12
  - Image β†’ {"type":"image","prompt":"<prompt>"}
13
  - Chart β†’ {"type":"chart","title":"<chart title>","data":[{"x":[1,2,3], "y":[2,4,6], "label":"Example"}]}
14
  - Simulation β†’ {"type":"simulation","topic":"<title>","steps":["...", "..."]}
15
+ - Text β†’ plain explanation or reasoning.
16
  """
17
 
18
+ # ----------------------------
19
+ # Helpers
20
+ # ----------------------------
21
  def query_llm(prompt, history, persona):
22
  input_text = SYSTEM_PROMPT
23
  if persona != "Default":
 
26
  input_text += f"User: {u}\nAssistant: {a}\n"
27
  input_text += f"User: {prompt}\nAssistant:"
28
 
29
+ out = chat_model(input_text, max_new_tokens=150, do_sample=True, temperature=0.7)
30
  return out[0]["generated_text"].strip()
31
 
32
+ def make_placeholder_image(prompt: str):
33
+ """Creates a simple placeholder image with the prompt written on it"""
34
+ img = Image.new("RGB", (512, 512), color=(30, 30, 60))
35
+ d = ImageDraw.Draw(img)
36
+ try:
37
+ font = ImageFont.truetype("DejaVuSans.ttf", 22)
38
+ except:
39
+ font = None
40
+ d.text((20, 20), f"[Sketch of: {prompt}]", fill=(200, 200, 255), font=font)
41
+ return img
42
+
43
  def multimodal_chat(user_msg, history, persona):
44
  history = history or []
45
  assistant_content = query_llm(user_msg, history, persona)
 
49
  parsed = json.loads(assistant_content)
50
 
51
  if parsed.get("type") == "image":
52
+ img = make_placeholder_image(parsed["prompt"])
53
+ history.append([user_msg, f"πŸ–ΌοΈ (Placeholder image for: {parsed['prompt']})"])
 
 
 
54
 
55
  elif parsed.get("type") == "chart":
56
  fig = go.Figure()
 
68
  else:
69
  history.append([user_msg, assistant_content])
70
 
71
+ except Exception:
72
+ # fallback: plain text
73
  history.append([user_msg, assistant_content])
74
 
75
  return history, img, fig
 
78
  # Gradio UI
79
  # ----------------------------
80
  with gr.Blocks(css="style.css") as demo:
81
+ gr.Markdown("🧠 **ZEN Research Lab (Guaranteed-to-Run Edition)**", elem_id="zen-header")
82
+
83
+ gr.Markdown("βœ… Text βœ… Charts βœ… Simulation βœ… Placeholder Images (no GPU needed)")
 
 
 
 
 
84
 
85
  persona = gr.Dropdown(["Default","Analyst","Artist","Futurist","Philosopher"], label="Mode", value="Default")
86
  chatbot = gr.Chatbot(label="Conversation", height=400)