WellebeingDT / workingapp.py
Rahatara's picture
Rename app.py to workingapp.py
3294fcd verified
import gradio as gr
import os
import re
import base64
import io
from PIL import Image
from groq import Groq
# Initialize Groq client
client = Groq(api_key=os.environ["GROQ_API_KEY"])
# Define slot templates
slot_templates = {
"UQ": {"template": "Extract the main task and goal from the following input: {UserInput}. Output only the query between <UQ> and </UQ>.", "start_tag": "<UQ>", "end_tag": "</UQ>", "default": "Understand well-being pattern."},
"CP": {"template": "Identify any personal or situational context from the following input: {UserInput}. Output the context between <CP> and </CP>. If none, output 'No context provided'.", "start_tag": "<CP>", "end_tag": "</CP>", "default": "No context provided."},
"J": {"template": "Define ethical guidelines for responding to: {UserInput}. Output between <J> and </J>.", "start_tag": "<J>", "end_tag": "</J>", "default": "Promote well-being."},
"ROLE": {"template": "Determine assistant role for: {UserInput}. Output between <ROLE> and </ROLE>.", "start_tag": "<ROLE>", "end_tag": "</ROLE>", "default": "well-being assistant"},
"TONE": {"template": "Identify tone for: {UserInput}. Output between <TONE> and </TONE>.", "start_tag": "<TONE>", "end_tag": "</TONE>", "default": "supportive"},
"FILT": {"template": "Specify content filtering constraints for: {UserInput}. Output between <FILT> and </FILT>.", "start_tag": "<FILT>", "end_tag": "</FILT>", "default": "Comply with ethical standards"},
"FE": {"template": "Generate few-shot examples for: {UserInput}. Output between <FE> and </FE>.", "start_tag": "<FE>", "end_tag": "</FE>", "default": "[Q: How can I sleep better? A: Maintain a consistent bedtime.]"}
}
# Global state
slots = {}
conversation = []
# Extract slot value
def extract_slot(response, slot):
match = re.search(f"{re.escape(slot['start_tag'])}(.*?){re.escape(slot['end_tag'])}", response, re.DOTALL)
return match.group(1).strip() if match else slot["default"]
# Call LLM
def call_llm(prompt):
return client.chat.completions.create(
model="meta-llama/llama-4-scout-17b-16e-instruct",
messages=[{"role": "user", "content": prompt}],
temperature=0.3,
max_tokens=512
).choices[0].message.content
# Convert image to data URL
def image_to_data_url(pil_image):
buffered = io.BytesIO()
pil_image.save(buffered, format="JPEG")
return f"data:image/jpeg;base64,{base64.b64encode(buffered.getvalue()).decode('utf-8')}"
# Slot setup
def setup_from_input(user_input):
global slots, conversation
slots = {k: extract_slot(call_llm(v["template"].format(UserInput=user_input)), v) for k, v in slot_templates.items()}
system_msg = f"You are a {slots['ROLE']}. Respond in a {slots['TONE']} tone. Follow these constraints: {slots['FILT']}. Use these examples: {slots['FE']}"
user_msg = f"The user wants: {slots['UQ']}\nContext: {slots['CP']}\nJustify advice with: {slots['J']}"
conversation = [
{"role": "system", "content": system_msg},
{"role": "user", "content": user_msg}
]
reply = client.chat.completions.create(
model="meta-llama/llama-4-scout-17b-16e-instruct",
messages=conversation,
temperature=0.5,
max_tokens=512
).choices[0].message.content
conversation.append({"role": "assistant", "content": reply})
return tuple(map(str, [slots[k] for k in ["UQ", "CP", "J", "ROLE", "TONE", "FILT", "FE"]] + [user_msg, system_msg, reply]))
# Chat handler
def chat_with_image(user_text, user_image):
if not conversation:
return "Please initialize the assistant first."
user_content = [{"type": "text", "text": user_text}]
if user_image:
user_content.append({"type": "image_url", "image_url": {"url": image_to_data_url(user_image)}})
conversation.append({"role": "user", "content": user_content})
stream = client.chat.completions.create(
model="meta-llama/llama-4-scout-17b-16e-instruct",
messages=conversation,
temperature=0.7,
max_tokens=1024,
stream=True
)
reply = ""
for chunk in stream:
reply += chunk.choices[0].delta.content or ""
conversation.append({"role": "assistant", "content": reply})
return reply
# UI
with gr.Blocks() as demo:
gr.Markdown("## 🧠 Responsible Well-Being Assistant")
with gr.Tabs():
with gr.Tab("1️⃣ Setup Assistant"):
user_input = gr.Textbox(label="User Input", placeholder="e.g., How can I improve my sleep?")
setup_btn = gr.Button("Generate Slots + Prompts")
with gr.Accordion("Slots"):
uq = gr.Textbox(label="User Query")
cp = gr.Textbox(label="Context")
j = gr.Textbox(label="Justification")
role = gr.Textbox(label="Role")
tone = gr.Textbox(label="Tone")
filt = gr.Textbox(label="Filter")
fe = gr.Textbox(label="Few-shot")
user_prompt = gr.Textbox(label="User Prompt")
sys_instr = gr.Textbox(label="System Instruction")
setup_response = gr.Textbox(label="Initial Assistant Reply", lines=6)
setup_btn.click(setup_from_input, inputs=user_input, outputs=[uq, cp, j, role, tone, filt, fe, user_prompt, sys_instr, setup_response])
with gr.Tab("2️⃣ Multimodal Chat"):
chat_input = gr.Textbox(label="Your Message")
chat_image = gr.Image(type="pil", label="Upload Image (optional)")
chat_btn = gr.Button("Send")
chat_output = gr.Textbox(label="Assistant Reply", lines=6)
chat_btn.click(chat_with_image, inputs=[chat_input, chat_image], outputs=chat_output)
if __name__ == "__main__":
demo.launch()