after certain shocking findings , I have discovered a discrepancy in the value of x and predictions , becos of which now I will test my own model for the same .
a3d47cc
| import gradio as gr | |
| import tensorflow as tf | |
| from keras_preprocessing.text import tokenizer_from_json | |
| import numpy as np | |
| import nltk | |
| nltk.download('stopwords') | |
| nltk.download('wordnet') | |
| from nltk.corpus import stopwords | |
| import re | |
| from nltk.stem import WordNetLemmatizer | |
| stemmer = WordNetLemmatizer() | |
| from keras.preprocessing.text import Tokenizer | |
| from keras import backend as K | |
| #Завантажуємо наші моделі | |
| def recall(y_true, y_pred): | |
| true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) | |
| possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) | |
| recall = true_positives / (possible_positives + K.epsilon()) | |
| return recall | |
| def precision(y_true, y_pred): | |
| true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) | |
| predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) | |
| precision = true_positives / (predicted_positives + K.epsilon()) | |
| return precision | |
| def f1(y_true, y_pred): | |
| p = precision(y_true, y_pred) | |
| r = recall(y_true, y_pred) | |
| return 2 * ((p * r) / (p + r)) | |
| def accuracy(y_true, y_pred): | |
| return K.mean(K.equal(y_true, K.round(y_pred)), axis=1) | |
| with open('tokenizer.json', 'r', encoding='utf-8') as f: | |
| tokenizer_config = f.read() | |
| tokenizer = tokenizer_from_json(tokenizer_config) | |
| model = tf.keras.models.load_model("model_RNN.h5", custom_objects={'f1':f1, 'recall': recall, 'precision': precision, 'accuracy':accuracy}) | |
| def get_sentiment(text): | |
| global model | |
| text = re.sub(r"@\S+|https?:\S+|http?:\S|[^A-Za-z0-9]+", ' ', str(text)) | |
| text = re.sub(r'\s+[a-zA-Z]\s+', ' ', text) | |
| text = re.sub(r'\s+', ' ', text, flags=re.I) | |
| text = text.lower() | |
| text = text.split() | |
| text = [stemmer.lemmatize(word) for word in text] | |
| text = ' '.join(text) | |
| text = tokenizer.texts_to_sequences([text])[0] | |
| text += [0] * (30 - len(text)) | |
| text = np.array(text).reshape(-1, 30) | |
| x = model.predict(text).tolist()[0][0] | |
| return x | |
| interface = gr.Interface(fn = get_sentiment, inputs = 'text', outputs = 'text', title = 'Kotsko Kyrylo IPS-42 Sentiment140', description = 'Kotsko Kyrylo IPS-42 Sentiment140') | |
| interface.launch(share=False) | |