File size: 3,655 Bytes
a63309f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 | from google.colab import drive
drive.mount('/content/drive')
import nltk
nltk.download('punkt')
nltk.download('wordnet')
import json
import random
import numpy as np
import nltk
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import SGD
from sklearn.preprocessing import LabelEncoder
from nltk.stem import WordNetLemmatizer
file_path = '/content/drive/MyDrive/Colab_Notebooks/Dataset/intents.json'
with open(file_path,'r') as file:
data = json.load(file)
lemmatizer = WordNetLemmatizer()
words = []
classes = []
documents = []
ignore_words = ['?', '!', '.']
for intent in data['intents']:
for pattern in intent['patterns']:
# Tokenize each word
word_list = nltk.word_tokenize(pattern)
words.extend(word_list)
documents.append((word_list, intent['tag']))
if intent['tag'] not in classes:
classes.append(intent['tag'])
words = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_words]
words = sorted(list(set(words)))
classes = sorted(list(set(classes)))
training = []
output_empty = [0] * len(classes)
for doc in documents:
bag = []
word_patterns = doc[0]
word_patterns = [lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for w in words:
bag.append(1 if w in word_patterns else 0)
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training, dtype=object)
train_x = np.array(list(training[:, 0]))
train_y = np.array(list(training[:, 1]))
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
sgd = SGD(learning_rate=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
hist = model.fit(train_x, train_y, epochs=200, batch_size=5, verbose=1)
model.save('chatbot_model.h5', hist)
print("Model created and saved successfully!")
import tensorflow as tf
model = tf.keras.models.load_model('chatbot_model.h5')
def clean_up_sentence(sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
def bag_of_words(sentence, words):
sentence_words = clean_up_sentence(sentence)
bag = [0] * len(words)
for s in sentence_words:
for i, w in enumerate(words):
if w == s:
bag[i] = 1
return np.array(bag)
def predict_class(sentence, model):
bow = bag_of_words(sentence, words)
res = model.predict(np.array([bow]))[0]
ERROR_THRESHOLD = 0.25
results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]
results.sort(key=lambda x: x[1], reverse=True)
return_list = [{"intent": classes[r[0]], "probability": str(r[1])} for r in results]
return return_list
def get_response(intents_list, intents_json):
tag = intents_list[0]['intent']
for i in intents_json['intents']:
if i['tag'] == tag:
return random.choice(i['responses'])
print("Bot is ready to chat! Type 'quit' to stop.")
while True:
message = input("You: ")
if message.lower() == "quit":
break
ints = predict_class(message, model)
if ints:
res = get_response(ints, data)
print("Bot:", res)
else:
print("Bot: Sorry, I don't understand that.")\ |