import tensorflow as tf import gradio as gr import pandas as pd import pickle from keras.utils import pad_sequences max_len = 200 # set the vocabulary mapping on a new TextVectorization layer with open('tokenizer.pickle', 'rb') as handle: tokenizer = pickle.load(handle) model = tf.keras.models.load_model('toxic.h5') arr=["toxic","severe_toxic","obscene threat","insult","identity_hate"] def score_comment(comment): sequences = tokenizer.texts_to_sequences([comment]) input = pad_sequences(sequences,maxlen=max_len) results = model.predict(input) text = '' for i in range(len(arr)): text += '{}: {}\n'.format(arr[i], results[0][i]>0.5) return text interface = gr.Interface(fn=score_comment, inputs=gr.inputs.Textbox(lines=2, placeholder='Comment to score'), outputs='text') interface.launch()