ProfCool-AI / app.py
ntaexams's picture
Update app.py
0546586 verified
import os
import streamlit as st
from ctransformers import AutoModelForCausalLM
st.title("βœ… Hugging Face Streamlit Test")
st.write("Hello! If you see this, the app is working!")
st.write("Server running at:", os.getenv("STREAMLIT_SERVER_ADDRESS", "Unknown"))
if st.button("Check Status"):
st.success("The app is running correctly!")
# Define model repo and filename
MODEL_REPO = "TheBloke/Phi-2-GGUF"
MODEL_FILE = "phi-2.Q4_K_M.gguf" # Change based on the quantization version you want
# Define model path
MODEL_DIR = "./models"
MODEL_PATH = os.path.join(MODEL_DIR, MODEL_FILE)
# Ensure the model directory exists
os.makedirs(MODEL_DIR, exist_ok=True)
# Function to download the model if not present
def download_model():
if not os.path.exists(MODEL_PATH):
st.info(f"Downloading {MODEL_FILE} from {MODEL_REPO}...")
model = AutoModelForCausalLM.from_pretrained(
MODEL_REPO,
model_file=MODEL_FILE,
model_type="phi2",
cache_dir=MODEL_DIR
)
st.success("Model downloaded successfully!")
else:
st.success("Model already exists. Loading...")
# Streamlit UI
st.title("πŸ”¬ Phi-2 AI Chatbot")
st.markdown("A lightweight chatbot running Phi-2 on Hugging Face Spaces!")
# Download model if not available
download_model()
# Load the model
st.info("Loading model...")
model = AutoModelForCausalLM.from_pretrained(
MODEL_DIR,
model_file=MODEL_FILE,
model_type="phi2"
)
st.success("Model loaded successfully! πŸŽ‰")
# Chat UI
user_input = st.text_input("πŸ’¬ Ask something:")
if user_input:
response = model(user_input)
st.write("πŸ€– AI Response:", response)