gusdelact's picture
Update app.py
acbf5d8 verified
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import pickle
filename="chatbot_120_epochs.h5"
model= tf.keras.models.load_model(filename)
tokenizer = pickle.load(open("tokenizer.sav","rb"))
max_story_len=156
max_question_len=6
def predict_answer(story, question):
# Preprocess the story and question
story_seq = tokenizer.texts_to_sequences([story.split()])
question_seq = tokenizer.texts_to_sequences([question.split()])
story_padded = pad_sequences(story_seq, maxlen=max_story_len)
question_padded = pad_sequences(question_seq, maxlen=max_question_len)
# Get the prediction from the model
pred_results = model.predict([story_padded, question_padded])
# Get the predicted word
val_max = np.argmax(pred_results[0])
predicted_word = ""
for key, val in tokenizer.word_index.items():
if val == val_max:
predicted_word = key
break
return predicted_word
import gradio as gr
# Create the Gradio interface
iface = gr.Interface(
fn=predict_answer,
inputs=["text", "text"],
outputs="text",
live=False,
title="Question Answering Chatbot",
description="Enter a story and a question to get an answer from the model."
)
# Launch the interface
iface.launch(debug=True)