Spaces:
Sleeping
Sleeping
File size: 1,724 Bytes
16d2ed6 06628dc 16d2ed6 06628dc 16d2ed6 06628dc 16d2ed6 06628dc 16d2ed6 06628dc 16d2ed6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 | import torch
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
MODEL_ID = "WarTitan2077/CiC-ChatBot"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID)
model.eval()
def ask_cic(role, name, contact, message):
prompt = (
f"Role: {role}\n"
f"Name: {name}\n"
f"Contact: {contact}\n"
f"Message: {message}\n\n"
f"Answer:"
)
inputs = tokenizer(
prompt,
return_tensors="pt",
truncation=True,
max_length=512
)
with torch.no_grad():
output_ids = model.generate(
**inputs,
max_new_tokens=200,
num_beams=4,
early_stopping=True,
no_repeat_ngram_size=3
)
decoded = tokenizer.decode(output_ids[0], skip_special_tokens=True)
# 🔥 CRITICAL: strip prompt echo
if "Answer:" in decoded:
decoded = decoded.split("Answer:", 1)[1].strip()
return decoded
# -------- Gradio UI --------
with gr.Blocks() as demo:
gr.Markdown("## 🤖 Coding-In-Color Role-Based Assistant")
role = gr.Dropdown(
["Guest", "Student", "Parent", "Staff"],
label="Role",
value="Guest"
)
name = gr.Textbox(label="Name", placeholder="Optional")
contact = gr.Textbox(label="Email or Phone (Optional)")
message = gr.Textbox(
label="Message",
placeholder="Ask your question here...",
lines=4
)
output = gr.Textbox(label="Response", lines=6)
submit = gr.Button("Submit")
submit.click(
ask_cic,
inputs=[role, name, contact, message],
outputs=output
)
demo.launch()
|