Finish the generate function and use the settings.py that I'm going to create
Browse files
test.py
CHANGED
|
@@ -3,6 +3,8 @@ import numpy as np
|
|
| 3 |
from keras.saving import load_model
|
| 4 |
from keras_self_attention import SeqSelfAttention
|
| 5 |
from vecs import *
|
|
|
|
|
|
|
| 6 |
|
| 7 |
with open("dataset.json", "r") as f:
|
| 8 |
dset = json.load(f)
|
|
@@ -17,7 +19,10 @@ def find_line_number(array):
|
|
| 17 |
|
| 18 |
def generate(text):
|
| 19 |
tokens = list(tokenizer.texts_to_sequences([text,])[0]) # text into tokens (almost words)
|
| 20 |
-
tokens =
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
if __name__ == "__main__": # if this code is not being imported, open the chat
|
| 23 |
while True:
|
|
|
|
| 3 |
from keras.saving import load_model
|
| 4 |
from keras_self_attention import SeqSelfAttention
|
| 5 |
from vecs import *
|
| 6 |
+
from model_settings import *
|
| 7 |
+
|
| 8 |
|
| 9 |
with open("dataset.json", "r") as f:
|
| 10 |
dset = json.load(f)
|
|
|
|
| 19 |
|
| 20 |
def generate(text):
|
| 21 |
tokens = list(tokenizer.texts_to_sequences([text,])[0]) # text into tokens (almost words)
|
| 22 |
+
tokens = (tokens+[0,]*inp_len)[:inp_len] # cutting off the sentence after inp_len words
|
| 23 |
+
prediction = model.predict(np.array([tokens,]))[0]
|
| 24 |
+
line = find_line_number(prediction)
|
| 25 |
+
return list(dset.values())[line]
|
| 26 |
|
| 27 |
if __name__ == "__main__": # if this code is not being imported, open the chat
|
| 28 |
while True:
|