File size: 1,729 Bytes
93c96f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import pickle
import streamlit as st
import os
import numpy as np

# 💡 Define the custom tokenizer exactly as used during training
def custom_tokenizer(text):
    # Modify this function to match your original tokenizer logic
    return text.lower().split()



# 🔃 Load model files
try:
    with open("tfidf (1).pkl", "rb") as f:
        vectorizer = pickle.load(f)

    with open("model (7).pkl", "rb") as f:
        model = pickle.load(f)

    with open("mlb (2).pkl", "rb") as f:
        mlb = pickle.load(f)

except Exception as e:
    st.error(f"❌ Error loading model files: {str(e)}")
    st.stop()

# 🧠 Prediction function
def predict_tags(title, description):
    try:
        if not title.strip() or not description.strip():
            return "⚠️ Please enter both title and description."

        input_text = title + " " + description
        input_vector = vectorizer.transform([input_text])
        prediction = model.predict(input_vector)
        predicted_tags = mlb.inverse_transform(prediction)
        st.write(predicted_tags)
        if predicted_tags and predicted_tags[0]:
            return "✅ Predicted Tags: " + ", ".join(predicted_tags[0])
        else:
            return "ℹ️ No tags predicted. Try refining your question."

    except Exception as e:
        return f"❌ Error during prediction: {str(e)}"

# 🚀 Streamlit UI
st.title("🔖 Stack Overflow Tags Predictor")
st.markdown("Enter a question title and description to predict relevant tags.")

title = st.text_input("📌 Enter Question Title")
description = st.text_area("📝 Enter Question Description", height=150)

if st.button("Predict Tags"):
    result = predict_tags(title, description)
    st.markdown(result)