Spaces:
Running
Running
File size: 1,573 Bytes
6603e4f 498831a 22956ea 498831a 22956ea 498831a 22956ea 498831a 22956ea 498831a 22956ea 6603e4f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 | import gradio as gr
import tensorflow as tf
import numpy as np
# 1. Load model TFLite (Pastikan nama file sesuai dengan yang Anda upload)
# Jika file model Anda bernama lain, ganti "model.tflite" di bawah ini
try:
interpreter = tf.lite.Interpreter(model_path="tiny_sentiment_model_imdb.tflite")
interpreter.allocate_tensors()
except Exception as e:
print(f"Error loading model: {e}")
def predict_sentiment(text):
# Logika inferensi (Sederhana sebagai contoh)
# Catatan: Anda perlu menambahkan tokenizer di sini agar teks bisa dibaca model
try:
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Placeholder: Proses input text ke tensor di sini
# interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
# Contoh logika output (sesuaikan dengan output model Anda)
prediction = output_data[0][0]
return "Positive" if prediction > 0.5 else "Negative"
except Exception as e:
return f"Error saat prediksi: {str(e)}"
# 2. UI Gradio (Tanpa argumen 'allow_flagging' yang error)
demo = gr.Interface(
fn=predict_sentiment,
inputs=gr.Textbox(label="Masukkan Kalimat", placeholder="Ketik di sini..."),
outputs=gr.Textbox(label="Hasil Analisis"),
title="Sentimen Analisis TFLite",
flagging_mode="never" # Pengganti allow_flagging="never"
)
if __name__ == "__main__":
demo.launch()
|