File size: 1,679 Bytes
9a0f359
e31fe55
 
 
0546586
 
 
 
 
 
 
 
 
9a0f359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e31fe55
9a0f359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import os
import streamlit as st
from ctransformers import AutoModelForCausalLM

st.title("βœ… Hugging Face Streamlit Test")

st.write("Hello! If you see this, the app is working!")

st.write("Server running at:", os.getenv("STREAMLIT_SERVER_ADDRESS", "Unknown"))

if st.button("Check Status"):
    st.success("The app is running correctly!")

# Define model repo and filename
MODEL_REPO = "TheBloke/Phi-2-GGUF"
MODEL_FILE = "phi-2.Q4_K_M.gguf"  # Change based on the quantization version you want

# Define model path
MODEL_DIR = "./models"
MODEL_PATH = os.path.join(MODEL_DIR, MODEL_FILE)

# Ensure the model directory exists
os.makedirs(MODEL_DIR, exist_ok=True)

# Function to download the model if not present
def download_model():
    if not os.path.exists(MODEL_PATH):
        st.info(f"Downloading {MODEL_FILE} from {MODEL_REPO}...")
        model = AutoModelForCausalLM.from_pretrained(
            MODEL_REPO,
            model_file=MODEL_FILE,
            model_type="phi2",
            cache_dir=MODEL_DIR
        )
        st.success("Model downloaded successfully!")
    else:
        st.success("Model already exists. Loading...")

# Streamlit UI
st.title("πŸ”¬ Phi-2 AI Chatbot")
st.markdown("A lightweight chatbot running Phi-2 on Hugging Face Spaces!")

# Download model if not available
download_model()

# Load the model
st.info("Loading model...")
model = AutoModelForCausalLM.from_pretrained(
    MODEL_DIR,
    model_file=MODEL_FILE,
    model_type="phi2"
)
st.success("Model loaded successfully! πŸŽ‰")

# Chat UI
user_input = st.text_input("πŸ’¬ Ask something:")
if user_input:
    response = model(user_input)
    st.write("πŸ€– AI Response:", response)