Spaces:
Sleeping
Sleeping
| from flask import Flask, render_template, request, jsonify, Response | |
| import numpy as np | |
| from tensorflow.keras.preprocessing.text import Tokenizer | |
| from tensorflow.keras.preprocessing.sequence import pad_sequences | |
| from tensorflow.keras.models import load_model | |
| import json | |
| import tensorflow as tf | |
| np.random.seed(42) | |
| tf.random.set_seed(42) | |
| lstm = load_model("model/model_lstm.h5") | |
| bilstm = load_model("model/model_bilstm.h5") | |
| gru = load_model("model/model_gru.h5") | |
| app = Flask(__name__) | |
| def index(): | |
| return render_template('index.html') | |
| def about(): | |
| return render_template('about.html') | |
| def ml(): | |
| return render_template('test.html') | |
| #option 1 | |
| def classify(): | |
| photo_x = "<svg xmlns='http://www.w3.org/2000/svg' fill='currentColor' class='bi bi-x larger-icon x' viewBox='0 0 16 16'><path d='M4.646 4.646a.5.5 0 0 1 .708 0L8 7.293l2.646-2.647a.5.5 0 0 1 .708.708L8.707 8l2.647 2.646a.5.5 0 0 1-.708.708L8 8.707l-2.646 2.647a.5.5 0 0 1-.708-.708L7.293 8 4.646 5.354a.5.5 0 0 1 0-.708z'/></svg>" | |
| photo_question_mark = "<svg xmlns='http://www.w3.org/2000/svg'fill='currentColor' class='bi bi-question-circle larger-icon questions' viewBox='0 0 16 16'><path d='M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z'/><path d='M5.255 5.786a.237.237 0 0 0 .241.247h.825c.138 0 .248-.113.266-.25.09-.656.54-1.134 1.342-1.134.686 0 1.314.343 1.314 1.168 0 .635-.374.927-.965 1.371-.673.489-1.206 1.06-1.168 1.987l.003.217a.25.25 0 0 0 .25.246h.811a.25.25 0 0 0 .25-.25v-.105c0-.718.273-.927 1.01-1.486.609-.463 1.244-.977 1.244-2.056 0-1.511-1.276-2.241-2.673-2.241-1.267 0-2.655.59-2.75 2.286zm1.557 5.763c0 .533.425.927 1.01.927.609 0 1.028-.394 1.028-.927 0-.552-.42-.94-1.029-.94-.584 0-1.009.388-1.009.94z'/></svg>" | |
| photo_check = "<svg xmlns='http://www.w3.org/2000/svg' fill='currentColor' class='bi bi-check larger-icon check' viewBox='0 0 16 16'><path d='M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z'/></svg>" | |
| try: | |
| text_ = request.form.get('text') | |
| file = "model/model_classification.h5" | |
| loaded_model = load_model(file) | |
| print(1) | |
| except: | |
| return render_template('classify.html', prediction_text="None", text="None", prediction_photo=photo_question_mark) | |
| try: | |
| new_data = [text_] | |
| tokenizer = Tokenizer(num_words=100, oov_token = "<OOV>") | |
| tokenizer.fit_on_texts(new_data) | |
| new_sequences = tokenizer.texts_to_sequences(new_data) | |
| new_padded = pad_sequences(new_sequences, maxlen=100, padding="post", truncating="post") | |
| predictions = loaded_model.predict(new_padded) | |
| predict_labels = [1 if values>0.5 else 0 for values in predictions] | |
| res = "Positive" if predict_labels[0]==1 else "Negative" | |
| icon = photo_check if predict_labels[0]==1 else photo_x | |
| return render_template('classify.html', prediction_text=res, text=text_, prediction_photo=icon) | |
| except ValueError: | |
| return render_template('classify.html', prediction_text='INVALID INPUT', text=text_, prediction_photo=photo_question_mark) | |
| except: | |
| return render_template('classify.html', prediction_text="None", text="None", prediction_photo=photo_question_mark) | |
| def generate(): | |
| text_ = request.form.get('input') | |
| if text_ == None: | |
| return render_template('generate.html' ,res_lstm="",res_bilstm="",res_gru="") | |
| try: | |
| txt_lstm = predict(text_,5,lstm) | |
| txt_bilstm = predict(text_,5,bilstm) | |
| txt_gru = predict(text_,5,gru) | |
| return render_template('generate.html' ,res_lstm=txt_lstm,res_bilstm=txt_bilstm,res_gru=txt_gru) | |
| except : | |
| return render_template('generate.html' ,res_lstm="Something went wrong",res_bilstm="Something went wrong",res_gru="Something went wrong") | |
| def predict(seed_text,next_words,model): | |
| #return "Detect change sleep deformed which worse it time all house in philippines" | |
| with open("static/dictionary.json", "r") as file: | |
| loaded_dictionary = json.load(file) | |
| with open('static/tokenizer.json', 'r') as json_file: | |
| tokenizer_json = json_file.read() | |
| tokenizer = tf.keras.preprocessing.text.tokenizer_from_json(tokenizer_json) | |
| for _ in range(next_words): | |
| tokenizer.word_index = loaded_dictionary | |
| token_list = tokenizer.texts_to_sequences([seed_text])[0] | |
| token_list = pad_sequences([token_list], maxlen=51-1, padding='pre') | |
| predicted = np.argmax(model.predict(token_list), axis=-1) | |
| output_word = "" | |
| for word, index in tokenizer.word_index.items(): | |
| if index == predicted: | |
| output_word = word | |
| seed_text += " " + output_word | |
| return seed_text | |
| if __name__ == '__main__': | |
| app.run(debug=True) |