Spaces:
Sleeping
Sleeping
| import torch | |
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| MODEL_ID = "WarTitan2077/CiC-ChatBot" | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID) | |
| model.eval() | |
| def ask_cic(role, name, contact, message): | |
| prompt = ( | |
| f"Role: {role}\n" | |
| f"Name: {name}\n" | |
| f"Contact: {contact}\n" | |
| f"Message: {message}\n\n" | |
| f"Answer:" | |
| ) | |
| inputs = tokenizer( | |
| prompt, | |
| return_tensors="pt", | |
| truncation=True, | |
| max_length=512 | |
| ) | |
| with torch.no_grad(): | |
| output_ids = model.generate( | |
| **inputs, | |
| max_new_tokens=200, | |
| num_beams=4, | |
| early_stopping=True, | |
| no_repeat_ngram_size=3 | |
| ) | |
| decoded = tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
| # 🔥 CRITICAL: strip prompt echo | |
| if "Answer:" in decoded: | |
| decoded = decoded.split("Answer:", 1)[1].strip() | |
| return decoded | |
| # -------- Gradio UI -------- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## 🤖 Coding-In-Color Role-Based Assistant") | |
| role = gr.Dropdown( | |
| ["Guest", "Student", "Parent", "Staff"], | |
| label="Role", | |
| value="Guest" | |
| ) | |
| name = gr.Textbox(label="Name", placeholder="Optional") | |
| contact = gr.Textbox(label="Email or Phone (Optional)") | |
| message = gr.Textbox( | |
| label="Message", | |
| placeholder="Ask your question here...", | |
| lines=4 | |
| ) | |
| output = gr.Textbox(label="Response", lines=6) | |
| submit = gr.Button("Submit") | |
| submit.click( | |
| ask_cic, | |
| inputs=[role, name, contact, message], | |
| outputs=output | |
| ) | |
| demo.launch() | |