import streamlit as st import pandas as pd import joblib import numpy as np import string import nltk from nltk.corpus import stopwords as stp from nltk import pos_tag, word_tokenize as w, sent_tokenize as s from nltk.stem import WordNetLemmatizer as wl nltk.download('punkt_tab') # Download necessary NLTK data #nltk.download('punkt', quiet=True) #nltk.download('averaged_perceptron_tagger', quiet=True) #nltk.download('wordnet', quiet=True) #nltk.download('stopwords', quiet=True) nltk.download('punkt') nltk.download('averaged_perceptron_tagger_eng') nltk.download('wordnet') nltk.download('stopwords') # === Cleaning Function === def sahi_karneka_function(x): nouns=[] li=[] lem=wl() l=s(x) for i in l: d=w(i.lower()) for k in d: li.append(k) lw=len(li) j=0 while j= threshold).astype(int) # Step 4: Decode predicted_tags = ml.inverse_transform(y_predd) # Step 5: Display results st.success("✅ Predicted Tags:") if predicted_tags and predicted_tags[0]: for tag in predicted_tags[0]: st.markdown(f"🔹 **`{tag}`**") else: st.info("No tags matched the threshold.") # Step 6: Show a "Clear" button if st.button("Clear Input"): st.session_state.user_input = ""