Spaces:
Sleeping
Sleeping
File size: 1,632 Bytes
1eaee2c 5393fcc a2ad275 1eaee2c a2ad275 1eaee2c 9c98a5a 73d289c a2ad275 e3629ce 73d289c a2ad275 1053c74 73d289c a2ad275 e3629ce a2ad275 1eaee2c a647fb1 1eaee2c e3629ce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import tensorflow as tf
from transformers import DistilBertTokenizer, TFDistilBertForSequenceClassification
from huggingface_hub import snapshot_download
import joblib
import os
import os
from transformers import TFDistilBertForSequenceClassification
print("π Current working directory:", os.getcwd())
print("π Checking model path:", os.listdir("nlp_intent/intent_model") if os.path.exists("nlp_intent/intent_model") else "β not found")
try:
model = TFDistilBertForSequenceClassification.from_pretrained("nlp_intent/intent_model")
print("β
Model loaded successfully!")
except Exception as e:
print("π¨ Model load failed:", e)
repo_path = snapshot_download(
repo_id="samithcs/nlp_intent_model",
repo_type="model"
)
MODEL_PATH = os.path.join(repo_path, "nlp_intent", "intent_model")
TOKENIZER_PATH = os.path.join(repo_path, "nlp_intent", "intent_tokenizer")
LABEL_PATH = os.path.join(repo_path, "nlp_intent", "label_encoder.joblib")
model = TFDistilBertForSequenceClassification.from_pretrained(MODEL_PATH)
tokenizer = DistilBertTokenizer.from_pretrained(TOKENIZER_PATH)
label_encoder = joblib.load(LABEL_PATH)
print("Model loaded successfully!")
def predict_intent(text: str) -> dict:
inputs = tokenizer(text, return_tensors="tf", truncation=True, padding=True, max_length=128)
outputs = model(inputs)
predicted_class = tf.argmax(outputs.logits, axis=1).numpy()[0]
intent = label_encoder.inverse_transform([predicted_class])[0]
confidence = float(tf.nn.softmax(outputs.logits)[0][predicted_class].numpy())
return {"intent": intent, "confidence": confidence} |