Delete chatbot_kel.py
Browse files- chatbot_kel.py +0 -34
chatbot_kel.py
DELETED
|
@@ -1,34 +0,0 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
-
from keras.saving import load_model
|
| 3 |
-
from keras.preprocessing.text import Tokenizer
|
| 4 |
-
from keras_self_attention import SeqSelfAttention
|
| 5 |
-
from model_settings_kel import *
|
| 6 |
-
import json
|
| 7 |
-
from tokenizer import *
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
with open(dataset_file, "r") as f:
|
| 11 |
-
dset = json.load(f)
|
| 12 |
-
|
| 13 |
-
with open(responses_file, "r") as f:
|
| 14 |
-
lines = [x.rstrip("\n") for x in f.readlines()]
|
| 15 |
-
|
| 16 |
-
fit_on_texts(list(dset.keys()))
|
| 17 |
-
|
| 18 |
-
model = load_model("chatbot_kel.keras", custom_objects={"SeqSelfAttention": SeqSelfAttention})
|
| 19 |
-
|
| 20 |
-
def find_line_number(array):
|
| 21 |
-
return sorted(zip(list(array), [x for x in range(len(array))]), key=lambda x:x[0], reverse=True)[0][1] # yeah, one big line, find the biggest value and return the number of the line
|
| 22 |
-
|
| 23 |
-
def generate(text, verbose=1):
|
| 24 |
-
tokens = list(tokenize(text.lower())) # text into tokens (almost words)
|
| 25 |
-
tokens = (tokens+[0,]*inp_len)[:inp_len] # cutting off the sentence after inp_len words
|
| 26 |
-
prediction = model.predict(np.array([tokens,]), verbose=verbose)[0]
|
| 27 |
-
line = find_line_number(prediction)
|
| 28 |
-
return lines[line]
|
| 29 |
-
|
| 30 |
-
if __name__ == "__main__": # if this code is not being imported, open the chat
|
| 31 |
-
while True:
|
| 32 |
-
inp = input("User: ")
|
| 33 |
-
gen = generate(inp)
|
| 34 |
-
if gen != "<null>": print(f"Bot: {gen}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|