import zipfile import os import gradio as gr import tensorflow as tf from transformers import BertTokenizer, TFBertForSequenceClassification import numpy as np # Unzip model.zip if not already extracted if not os.path.exists("model"): with zipfile.ZipFile("model.zip", 'r') as zip_ref: zip_ref.extractall("model") # Correct Model Path MODEL_PATH = "model" # Load model and tokenizer model = TFBertForSequenceClassification.from_pretrained(MODEL_PATH) tokenizer = BertTokenizer.from_pretrained(MODEL_PATH) # Prediction function def predict_value(text, reason, threshold=0.7): combined_text = text + " [SEP] " + reason encoding = tokenizer(combined_text, padding="max_length", truncation=True, max_length=128, return_tensors="tf") logits = model.predict(dict(encoding)).logits probs = tf.nn.softmax(logits, axis=1).numpy() prediction = 1 if probs[:, 1] > threshold else 0 confidence = probs[:, 1][0] if prediction == 1: result = "✅ Valuable Feedback" else: result = "❌ Not Valuable Feedback" return result, f"Confidence Score: {confidence:.2f}" # Gradio UI with gr.Blocks(theme=gr.themes.Soft()) as demo: gr.Markdown( """ # 🚀 Text & Reason Evaluator Analyze if the provided text and reason are valuable! """ ) with gr.Row(): text_input = gr.Textbox(label="📝 Enter the Text") reason_input = gr.Textbox(label="💡 Enter the Reason") predict_button = gr.Button("🔍 Predict") output_result = gr.Textbox(label="Result") output_confidence = gr.Textbox(label="Confidence Score") predict_button.click( predict_value, inputs=[text_input, reason_input], outputs=[output_result, output_confidence], ) demo.launch()