|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| import random
|
| import json
|
| import pickle
|
| import numpy as np
|
|
|
| import nltk
|
| from nltk.stem import WordNetLemmatizer
|
|
|
| from tensorflow.keras.models import Sequential
|
| from tensorflow.keras.layers import Dense, Activation, Dropout
|
| from tensorflow.keras.optimizers import SGD
|
| from keras.models import model_from_json
|
|
|
| lemmatizer = WordNetLemmatizer()
|
|
|
| intents = json.loads (open ('Categories.json').read())
|
|
|
| words = []
|
| classes = []
|
| documents = []
|
| ignoreCharacters = [ ',', '.', '!', '?' ]
|
|
|
| for intent in intents ['categories']:
|
| for pattern in intent ['patterns']:
|
| wordList = nltk.word_tokenize (pattern)
|
| words.extend (wordList)
|
| documents.append ( (wordList, intent ['tag']) )
|
| if intent ['tag'] not in classes:
|
| classes.append (intent ['tag'])
|
|
|
| words = [ lemmatizer.lemmatize (word) for word in words if word not in ignoreCharacters]
|
| words = sorted (set (words))
|
|
|
| classes = sorted ( set (classes))
|
|
|
| pickle.dump (words, open ('words.pkl', 'wb'))
|
| pickle.dump (classes, open ('classes.pkl', 'wb'))
|
|
|
| training = []
|
| outputEmpty = [0] * len(classes)
|
|
|
| for document in documents:
|
| bag = []
|
| wordPatterns = document[0]
|
| wordPatterns = [lemmatizer.lemmatize (word.lower ()) for word in wordPatterns]
|
| for word in words:
|
| bag.append (1) if word in wordPatterns else bag.append (0)
|
|
|
| outputRow = list (outputEmpty)
|
| outputRow [classes.index (document[1])] = 1
|
| training.append ([bag, outputRow])
|
|
|
| random.shuffle (training)
|
| training = np.array (training)
|
|
|
| train_x = list (training[:, 0])
|
| train_y = list (training[:, 1])
|
|
|
| model = Sequential()
|
| model.add (Dense (128, input_shape = (len (train_x[0]),), activation='relu'))
|
| model.add (Dropout (0.5))
|
| model.add (Dense (64, activation='relu'))
|
| model.add (Dropout (0.5))
|
| model.add (Dense (len (train_y[0]), activation='softmax'))
|
|
|
| sgd = SGD (learning_rate = 0.01, decay = 1e-6, momentum = 0.5, nesterov = True)
|
| model.compile (loss = 'categorical_crossentropy', optimizer = sgd, metrics = ['accuracy'])
|
|
|
| history = model.fit (np.array (train_x), np.array (train_y), epochs = 200, batch_size = 5, verbose = 1 )
|
| model.save ('chatbot.h5', history)
|
| <<<<<<< Updated upstream
|
|
|
| json_model = model.to_json()
|
| with open('config.json', 'w') as json_file:
|
| json_file.write(json_model)
|
| model.save_weights('weights.h5')
|
|
|
|
|
| print('Training Completed')
|
| =======
|
|
|
| print('Training Completed')
|
| >>>>>>> Stashed changes
|
|
|