File size: 884 Bytes
bde20b1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349d2e4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import tensorflow as tf
import gradio as gr
import pandas as pd
import pickle
from keras.utils import pad_sequences

max_len = 200

# set the vocabulary mapping on a new TextVectorization layer
with open('tokenizer.pickle', 'rb') as handle:
    tokenizer = pickle.load(handle)
model = tf.keras.models.load_model('toxic.h5')


arr=["toxic","severe_toxic","obscene threat","insult","identity_hate"]
def score_comment(comment):
    sequences = tokenizer.texts_to_sequences([comment])
    input = pad_sequences(sequences,maxlen=max_len)
    results = model.predict(input)
    text = ''
    for i in range(len(arr)):
        text += '{}: {}\n'.format(arr[i], results[0][i]>0.5)
    return text

interface = gr.Interface(fn=score_comment, 
                         inputs=gr.inputs.Textbox(lines=2, placeholder='Comment to score'),
                        outputs='text')
interface.launch()