Spaces:
Sleeping
Sleeping
File size: 2,823 Bytes
cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 316e7dd cebaf48 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
# ================================
# ENVIRONMENT (MUST BE FIRST)
# ================================
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# ================================
# IMPORTS
# ================================
import gradio as gr
import tensorflow as tf
import numpy as np
import pickle
from PIL import Image
# ================================
# CONSTANTS (MUST MATCH TRAINING)
# ================================
IMG_SIZE = (128, 128)
MAX_LEN = 50
LABELS = ["Critical", "High", "Medium", "Low"]
# ================================
# LOAD MODEL (KERAS 3 SAFE)
# ================================
model = tf.keras.models.load_model(
"fusion_model_keras3.keras",
compile=False
)
print("β
Fusion model loaded")
# ================================
# LOAD TOKENIZER
# ================================
with open("tokenizer.pkl", "rb") as f:
tokenizer = pickle.load(f)
print("β
Tokenizer loaded")
# ================================
# IMAGE PREPROCESS
# ================================
def preprocess_image(image: Image.Image):
image = image.convert("RGB")
image = image.resize(IMG_SIZE)
img = np.array(image, dtype=np.float32) / 255.0
img = np.expand_dims(img, axis=0)
return img
# ================================
# TEXT PREPROCESS
# ================================
def preprocess_text(text: str):
if text is None:
text = ""
seq = tokenizer.texts_to_sequences([text])
padded = tf.keras.preprocessing.sequence.pad_sequences(
seq, maxlen=MAX_LEN
)
return padded
# ================================
# PREDICTION FUNCTION
# ================================
def predict_ticket(image, text):
if image is None:
return {
"Critical": 0.0,
"High": 0.0,
"Medium": 0.0,
"Low": 0.0,
}
img = preprocess_image(image)
txt = preprocess_text(text)
probs = model.predict([img, txt], verbose=0)[0]
return {
"Critical": float(probs[0]),
"High": float(probs[1]),
"Medium": float(probs[2]),
"Low": float(probs[3]),
}
# ================================
# GRADIO UI
# ================================
interface = gr.Interface(
fn=predict_ticket,
inputs=[
gr.Image(type="pil", label="π€ Upload Ticket Screenshot"),
gr.Textbox(
lines=4,
placeholder="Describe the issue (recommended)",
label="βοΈ Ticket Description"
)
],
outputs=gr.Label(num_top_classes=4, label="π¨ Predicted Severity"),
title="π« Ticket Severity Classification",
description=(
"CNN + NLP **Fusion Model** for ticket urgency detection.\n\n"
"**Classes:** Critical | High | Medium | Low"
)
)
interface.launch() |