import streamlit as st import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification # Streamlit app layout st.set_page_config(page_title="Check-In Classifier", page_icon="📝") st.title("📝 Check-In Classifier Chatbot") st.write("Classify your check-in and get feedback!") # Load your model and tokenizer from Hugging Face @st.cache_resource def load_model(): model_name = "SleepyTerr/checkin-classifier" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) return tokenizer, model tokenizer, model = load_model() # Label map label_map = {0: "Good", 1: "Average", 2: "Bad", 3: "Repetitive", 4: "Great"} # User input checkin = st.text_area("Enter your check-in:") # Prediction and feedback if st.button("Classify"): if checkin.strip(): inputs = tokenizer(checkin, return_tensors="pt", truncation=True, padding=True) with torch.no_grad(): logits = model(**inputs).logits prediction = torch.argmax(logits, dim=-1).item() rating = label_map[prediction] st.success(f"Your check-in was rated as: **{rating}**") # Feedback messages feedback = { "Good": "That's a good check-in!", "Average": "That's an average check-in, still room for improvement though.", "Bad": "Ehh... Not enough detail. It seems thrown together and like you didn't do anything. Do better.", "Repetitive": "Not good or bad, just explain better and stop repeating the same thing to make it longer..", "Great": "I have no feedback, your check-in is amazing!" } st.info(feedback[rating]) else: st.warning("Please enter a check-in to classify.")