ctgadget commited on
Commit
6b4dab8
·
1 Parent(s): 04bf091

adding project app folder, index page, and requirements.txt file.

Browse files
__init__.py ADDED
File without changes
chatbotlib/__init__.py ADDED
File without changes
chatbotlib/chatbot_demo.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ """The chatbot demo app is the "Demo the chatbot page" in the main page"""
4
+
5
+ # the code for the page
6
+ def run_app():
7
+ """
8
+ Application code for the Chatbot demo
9
+ """
10
+ ############################
11
+ # :: IMPORTS AND CONSTANTS #
12
+ ############################
13
+
14
+ import nltk
15
+ from nltk.stem import WordNetLemmatizer
16
+ lemmatizer = WordNetLemmatizer()
17
+
18
+ import pickle
19
+ import numpy as np
20
+ import pandas as pd
21
+
22
+ import time
23
+ import json
24
+ import random
25
+ import datetime
26
+
27
+ import streamlit as st
28
+ from streamlit_chat import message
29
+
30
+ from keras.models import load_model
31
+ chatbot_model = load_model('chatbot_model.h5')
32
+
33
+ # Load commands and preprocessed data
34
+ commands = json.loads(open('commands.json').read())
35
+ words = pickle.load(open('words.pkl', 'rb'))
36
+ classes = pickle.load(open('classes.pkl', 'rb'))
37
+
38
+ ######################################
39
+ # :: HELPER FUNCTIONS FOR PROCESSING #
40
+ ######################################
41
+
42
+ def clean_up_sentence(sentence):
43
+ """
44
+ Tokenize and lemmatize the input sentence.
45
+
46
+ Args:
47
+ sentence (str): The input sentence to be preprocessed.
48
+
49
+ Returns:
50
+ list: The list of preprocessed words.
51
+ """
52
+ sentence_words = nltk.word_tokenize(sentence)
53
+ sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
54
+ return sentence_words
55
+
56
+ def bag_of_words(sentence):
57
+ """Converts a sentence into a bag of words representation.
58
+
59
+ Args:
60
+ sentence (str): The input sentence to convert.
61
+
62
+ Returns:
63
+ numpy.ndarray: The bag of words representation as a NumPy array.
64
+
65
+ """
66
+ sentence_words = clean_up_sentence(sentence)
67
+ bag = np.zeros(len(words), dtype=np.float32)
68
+ indices = np.where(np.isin(words, sentence_words))
69
+ bag[indices] = 1
70
+ return bag
71
+
72
+ def predict_class(sentence):
73
+ """
74
+ Predicts the intent based on the input sentence.
75
+
76
+ Args:
77
+ sentence (str): The input sentence.
78
+
79
+ Returns:
80
+ list: A list of dictionaries containing the predicted intents and their probabilities, sorted by probability.
81
+
82
+ """
83
+ p = bag_of_words(sentence)
84
+ res = chatbot_model.predict(np.array([p]))[0]
85
+ ERROR_THRESHOLD = 0.25
86
+
87
+ threshold_indices = np.where(res > ERROR_THRESHOLD)[0]
88
+ results = [{"intent": classes[i], "probability": str(res[i])} for i in threshold_indices]
89
+ results.sort(key=lambda x: x["probability"], reverse=True)
90
+ return results
91
+
92
+ def get_random_response(commands_json, tag):
93
+ """
94
+ Retrieves a random response for the given tag from the commands JSON.
95
+
96
+ Args:
97
+ commands_json (dict): The JSON object containing the commands.
98
+ tag (str): The tag associated with the intent.
99
+
100
+ Returns:
101
+ str: A random response for the given tag.
102
+
103
+ """
104
+ list_of_commands = commands_json["intents"]
105
+ for i in list_of_commands:
106
+ if i["tag"] == tag:
107
+ result = random.choice(i["responses"])
108
+ return result
109
+ return "I'm sorry, I don't understand."
110
+
111
+
112
+ def chatbot_response(text):
113
+ """
114
+ Generate a response from the chatbot based on the user input.
115
+
116
+ Args:
117
+ text (str): The user input message.
118
+
119
+ Returns:
120
+ tuple: A tuple containing the generated response from the chatbot and the execution time.
121
+ """
122
+ start_time = time.time()
123
+ ints = predict_class(text)
124
+ elapsed_time = time.time() - start_time
125
+
126
+ if ints:
127
+ tag = ints[0]['intent']
128
+ res = get_random_response(commands, tag)
129
+ return res, elapsed_time
130
+ else:
131
+ return "I'm sorry, I don't understand.", elapsed_time
132
+
133
+ def get_text():
134
+ """
135
+ Displays a text input box and returns the user input.
136
+
137
+ Returns:
138
+ str: The user input.
139
+ """
140
+ input_text = st.text_input("You: ", "Hello, how are you?", key="input")
141
+ return input_text
142
+
143
+ def get_chat_history_df():
144
+ """
145
+ Retrieve the chat history from the session state and create a DataFrame.
146
+
147
+ Returns:
148
+ pd.DataFrame: The DataFrame containing the chat history with columns 'User Input' and 'Bot Response'.
149
+ """
150
+ # Get the chat history from the session state
151
+ chat_history = zip(st.session_state['past'], st.session_state['generated_responses'])
152
+ # Convert chat history to a list
153
+ chat_history_list = list(chat_history)
154
+ # Create a dataframe from the chat history list
155
+ chat_history_df = pd.DataFrame(chat_history_list, columns=['User Input', 'Bot Response'])
156
+
157
+ return chat_history_df
158
+
159
+ def export_chat_history(chat_history_df):
160
+ """
161
+ Export the chat history DataFrame to a CSV file and provide a download link.
162
+
163
+ Args:
164
+ chat_history_df (pd.DataFrame): The DataFrame containing the chat history.
165
+
166
+ Returns:
167
+ bool: True if the export is successful, False otherwise.
168
+ """
169
+ try:
170
+ # Get the current datetime
171
+ current_datetime = datetime.datetime.now().strftime("%m/%d/%Y_%I_%M_%S_%p")
172
+
173
+ # Define the CSV file path
174
+ chat_history_filename = f'chat_history_{current_datetime}.csv'
175
+
176
+ # Write the dataframe to a CSV file
177
+ chat_history_csv = chat_history_df.to_csv(index=False).encode("utf-8")
178
+
179
+ # Provide a download link for the CSV file
180
+ st.download_button(
181
+ label="Download Chat History",
182
+ data=chat_history_csv,
183
+ file_name=chat_history_filename,
184
+ mime="text/csv",
185
+ help="Download the chat history session to a CSV file."
186
+ )
187
+
188
+ return True
189
+ except Exception as e:
190
+ st.error(f"Export failed: {str(e)}")
191
+ return False
192
+
193
+ def clear_session_state():
194
+ """
195
+ Clear the session state variables related to chat history.
196
+
197
+ This function clears the session state variables 'generated_responses' and 'past',
198
+ which store the generated responses and user inputs in the chat history.
199
+
200
+ """
201
+ # Clear the session state variables
202
+ st.session_state['generated_responses'] = []
203
+ st.session_state['past'] = []
204
+
205
+ ###############
206
+ # :: MAIN APP #
207
+ ###############
208
+
209
+ st.markdown("<h1 style='text-align: left;'>NLP Chatbot Demo 💬</h1>", unsafe_allow_html=True)
210
+ st.subheader(
211
+ """
212
+ NLP Chatbot is a conversational chat bot. Begin by entering in a prompt below.
213
+ """
214
+ )
215
+ # Explanation of code using st.markdown with bullet points
216
+ st.markdown("""
217
+ - This chatbot app offers the following options:
218
+ - Option 1: Clear Chat Log
219
+ - Option 2: Preview the Chat History
220
+ - Option 3: Export Chat History
221
+
222
+ - To manage these options, three columns are created using `st.columns(3)`.
223
+ - Column 1 contains a checkbox labeled 'Clear Chat Log'. Selecting it will clear the chat log. Make sure to leave the text input box empty as well.
224
+ - Column 2 contains a checkbox labeled 'Preview the Chat History'. Selecting it will display the chat history.
225
+ - Column 3 contains a checkbox labeled 'Export Chat History'. Selecting it will export the chat history as a CSV file.
226
+
227
+ - Please interact with the checkboxes to perform the desired actions.
228
+ """)
229
+ st.write("---")
230
+ # Create a container for the columns
231
+ container = st.container()
232
+
233
+ # Add the columns inside the container
234
+ with container:
235
+ col1, col2, col3 = st.columns(3)
236
+
237
+ # Add a button to clear session state
238
+ if col1.checkbox("Clear Chat Log"):
239
+ clear_session_state()
240
+
241
+ if col2.checkbox("Preview the Chat History"):
242
+ chat_history_df = get_chat_history_df()
243
+ st.write(chat_history_df)
244
+
245
+ # Add a button to export chat history in the third column
246
+ if col3.checkbox("Export Chat History"):
247
+ chat_history_df = get_chat_history_df()
248
+ export_chat_history(chat_history_df)
249
+
250
+ # Initialize session states
251
+ if 'generated_responses' not in st.session_state:
252
+ st.session_state['generated_responses'] = []
253
+
254
+ if 'execution_times' not in st.session_state:
255
+ st.session_state['execution_times'] = []
256
+
257
+ if 'past' not in st.session_state:
258
+ st.session_state['past'] = []
259
+
260
+ #st.subheader(
261
+ #"""
262
+ #NLP Bot is an NLP conversational chat bot. Begin by entering in a prompt below.
263
+ #"""
264
+ #)
265
+ #st.write("---")
266
+
267
+ # Get user input
268
+ user_input = get_text()
269
+
270
+ if user_input:
271
+ # Generate response
272
+ response, exec_time = chatbot_response(user_input)
273
+
274
+ # Update session states
275
+ st.session_state.past.append(user_input)
276
+ st.session_state.generated_responses.append(response)
277
+ st.session_state.execution_times.append(exec_time)
278
+
279
+ # Display the execution time of the response as a metric
280
+ st.metric("Execution Time", f"{exec_time:.2f} seconds")
281
+
282
+ if st.session_state['generated_responses']:
283
+ # Display generated responses and user inputs
284
+ for i in range(len(st.session_state['generated_responses']) - 1, -1, -1):
285
+ # Unique key for each generated response widget
286
+ message(f"Bot: {st.session_state['generated_responses'][i]}",
287
+ is_user=False,
288
+ avatar_style='bottts-neutral',
289
+ seed=10,
290
+ key=f"response_{i}"
291
+ )
292
+ # Unique key for each user input widget
293
+ message(f"You: {st.session_state['past'][i]}",
294
+ is_user=True,
295
+ avatar_style="open-peeps",
296
+ seed=1,
297
+ key=f"user_{i}"
298
+ )
299
+
300
+ # End of app
301
+ if __name__ == "__main__":
302
+
303
+ run_app()
chatbotlib/train_chatbot.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ """The chatbot model training app is the "Train the chatbot model page" in the main page"""
4
+
5
+ # the code for the page
6
+ def run_app():
7
+ """
8
+ Application code for the Chatbot Training app
9
+ """
10
+ ############################
11
+ # :: IMPORTS AND CONSTANTS #
12
+ ############################
13
+
14
+ # machine learning modules
15
+ import numpy as np
16
+ import random
17
+
18
+ # Deep Learning modules
19
+ from keras.models import Sequential
20
+ from keras.layers import Dense, Dropout
21
+ from keras.optimizers import SGD
22
+
23
+ import streamlit as st
24
+
25
+ # suppress warnings
26
+ import warnings
27
+ warnings.filterwarnings('ignore')
28
+
29
+ # for file processing
30
+ import json
31
+ import pickle
32
+
33
+ # natural language processiong modules
34
+ import nltk
35
+ from nltk.stem import WordNetLemmatizer
36
+ lemmatizer = WordNetLemmatizer()
37
+
38
+ # Load NLTK dependencies
39
+ nltk.download('punkt')
40
+ nltk.download('wordnet')
41
+
42
+ ######################################
43
+ # :: HELPER FUNCTIONS FOR PROCESSING #
44
+ ######################################
45
+
46
+ def read_json_file(uploaded_file):
47
+ """
48
+ Reads a JSON file and returns its contents.
49
+
50
+ Args:
51
+ uploaded_file (_UploadedFile): The uploaded JSON file.
52
+
53
+ Returns:
54
+ dict: The contents of the JSON file.
55
+ """
56
+ data = json.loads(uploaded_file.read().decode("utf-8"))
57
+ return data
58
+
59
+ def preprocess_data(data):
60
+ """
61
+ Preprocesses the JSON data.
62
+
63
+ Args:
64
+ data (dict): The JSON data.
65
+
66
+ Returns:
67
+ tuple: The pre-processed data (words, classes, documents).
68
+ """
69
+
70
+ # creating lists for NLP
71
+ words=[]
72
+ classes = []
73
+ documents = []
74
+ ignore_words = ['?', '!']
75
+
76
+ lemmatizer = WordNetLemmatizer()
77
+ for command in data['intents']:
78
+ for pattern in command['patterns']:
79
+ # Tokenize each word
80
+ w = nltk.word_tokenize(pattern)
81
+ words.extend(w)
82
+ # Add documents to the corpus
83
+ documents.append((w, command['tag']))
84
+ # Add to classes list
85
+ if command['tag'] not in classes:
86
+ classes.append(command['tag'])
87
+
88
+ # Lemmatize, convert to lowercase, and remove duplicates
89
+ words = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_words]
90
+ words = sorted(list(set(words)))
91
+ classes = sorted(list(set(classes)))
92
+
93
+ return words, classes, documents
94
+
95
+ def load_pickle_data():
96
+ """
97
+ Loads the pickle data.
98
+
99
+ Returns:
100
+ tuple: The loaded pickle data (words, classes, documents).
101
+ """
102
+ with open('words.pkl', 'rb') as f:
103
+ words = pickle.load(f)
104
+ with open('classes.pkl', 'rb') as f:
105
+ classes = pickle.load(f)
106
+ with open('documents.pkl', 'rb') as f:
107
+ documents = pickle.load(f)
108
+ return words, classes, documents
109
+
110
+ def create_training_data(words, classes, documents):
111
+ """
112
+ Create the training data for the chatbot model.
113
+
114
+ Args:
115
+ words (list): List of words in the vocabulary.
116
+ classes (list): List of classes/intents.
117
+ documents (list): List of (pattern, intent) pairs.
118
+
119
+ Returns:
120
+ tuple: Tuple containing the training data as NumPy arrays (train_x, train_y).
121
+ """
122
+ training = []
123
+ output_empty = np.zeros(len(classes), dtype=int)
124
+
125
+ for doc in documents:
126
+ bag = np.zeros(len(words), dtype=int)
127
+ pattern_words = doc[0]
128
+ pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words]
129
+
130
+ for i, w in enumerate(words):
131
+ if w in pattern_words:
132
+ bag[i] = 1
133
+
134
+ output_row = np.copy(output_empty)
135
+ output_row[classes.index(doc[1])] = 1
136
+
137
+ training.append([bag, output_row])
138
+
139
+ random.shuffle(training)
140
+ training = np.array(training)
141
+
142
+ train_x = list(training[:, 0])
143
+ train_y = list(training[:, 1])
144
+
145
+ return np.array(train_x), np.array(train_y)
146
+
147
+ ###############
148
+ # :: MAIN APP #
149
+ ###############
150
+
151
+ st.markdown("<h1 style='text-align: left;'>Train the chatbot model ⚙️</h1>", unsafe_allow_html=True)
152
+ st.subheader(
153
+ """
154
+ Let's train the chatbot model by following the sequence of steps provided below:
155
+ """
156
+ )
157
+ #st.markdown("<div style='text-align: center;'>Let's train the chatbot model by following the sequence of steps provided below:</div>", unsafe_allow_html=True)
158
+ # Summary of steps
159
+ st.markdown(
160
+ """
161
+ **Summary of Steps:**
162
+ - Upload the `commands.json` file for processing. The file should contain the commands and their corresponding tags.
163
+ - Load the preprocessed data from pickle files (optional if you have already processed the data previously).
164
+ - Create the training data by converting the commands into numerical vectors.
165
+ - Build the model by specifying the number of layers, epochs, batch size, and activation function.
166
+
167
+ Once the model is built, the training loss and accuracy will be displayed.
168
+ """
169
+ )
170
+ st.write("---")
171
+
172
+ if st.checkbox("Upload the commands.json file for processing"):
173
+ st.subheader("JSON File Uploader")
174
+ uploaded_file = st.file_uploader("Upload JSON file", type="json")
175
+
176
+ if uploaded_file is not None:
177
+ try:
178
+ data = read_json_file(uploaded_file)
179
+
180
+ st.json(data)
181
+
182
+ # Preprocess the data
183
+ words, classes, documents = preprocess_data(data)
184
+
185
+ # Save the preprocessed data as pickle files
186
+ with open('words.pkl', 'wb') as f:
187
+ pickle.dump(words, f)
188
+ with open('classes.pkl', 'wb') as f:
189
+ pickle.dump(classes, f)
190
+ with open('documents.pkl', 'wb') as f:
191
+ pickle.dump(documents, f)
192
+
193
+ # Display the processed data
194
+ st.write("Preprocessing Results:")
195
+ st.write(len(documents), "documents")
196
+ st.write(len(classes), "classes", classes)
197
+ st.write(len(words), "unique lemmatized words", words)
198
+
199
+ except json.JSONDecodeError:
200
+ st.error("Invalid JSON file.")
201
+
202
+ if st.checkbox("Load pickle data"):
203
+ # Initialize the progress bar
204
+ progress_bar = st.progress(0)
205
+
206
+ with st.spinner("Creating training data ..."):
207
+
208
+ words, classes, documents = load_pickle_data()
209
+
210
+ # Update the progress bar
211
+ progress_bar.progress(100)
212
+
213
+ st.write("Words:")
214
+ st.write(words)
215
+
216
+ st.write("Classes:")
217
+ st.write(classes)
218
+
219
+ st.write("Documents:")
220
+ st.write(documents)
221
+
222
+ if st.checkbox("Create training data"):
223
+ try:
224
+ # Initialize the progress bar
225
+ progress_bar = st.progress(0)
226
+
227
+ with st.spinner("Creating training data ..."):
228
+ train_x, train_y = create_training_data(words, classes, documents)
229
+
230
+ # Update the progress bar
231
+ progress_bar.progress(100)
232
+
233
+ st.success("Training data created")
234
+
235
+ st.write(f"Training data (train_x): {len(train_x)} samples")
236
+ st.write(f"Training data (train_y): {len(train_y)} samples")
237
+
238
+ except Exception as e:
239
+ st.error("An error occurred during training data creation.")
240
+ st.error(str(e))
241
+
242
+ if st.checkbox("Build the model"):
243
+ # Get user inputs
244
+ num_layers = st.number_input("Number of layers", min_value=1, max_value=10, value=3)
245
+ epochs = st.number_input("Number of epochs", min_value=1, max_value=1000, value=200)
246
+ batch_size = st.number_input("Batch size", min_value=1, max_value=100, value=5)
247
+ activation_functions = ['relu', 'sigmoid', 'softmax']
248
+ activation_function = st.selectbox("Activation function", options=activation_functions)
249
+
250
+ try:
251
+ # Initialize the progress bar
252
+ progress_bar = st.progress(0)
253
+
254
+ with st.spinner("Building the model ..."):
255
+ # Create model
256
+ model = Sequential()
257
+
258
+ # Add layers to the model based on user input
259
+ for i in range(num_layers):
260
+ if i == 0:
261
+ # Input layer
262
+ model.add(Dense(128, input_shape=(len(train_x[0]),), activation=activation_function))
263
+ else:
264
+ # Hidden layers
265
+ model.add(Dense(64, activation=activation_function))
266
+ model.add(Dropout(0.5))
267
+
268
+ # Output layer
269
+ model.add(Dense(len(train_y[0]), activation='softmax'))
270
+
271
+ # Compile model
272
+ sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
273
+ model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
274
+
275
+ # Fit the model
276
+ hist = model.fit(np.array(train_x), np.array(train_y), epochs=epochs, batch_size=batch_size, verbose=1)
277
+
278
+ # Save the model
279
+ model.save('chatbot_model.h5', hist)
280
+
281
+ # Update the progress bar
282
+ progress_bar.progress(100)
283
+
284
+ st.success("The chatbot model is created")
285
+
286
+ # Display training loss and accuracy summary
287
+ st.subheader("Training Summary")
288
+ st.write("Training Loss:", hist.history['loss'][-1])
289
+ st.write("Training Accuracy:", hist.history['accuracy'][-1])
290
+
291
+ except Exception as e:
292
+ st.error("An error occurred during model building.")
293
+ st.error(str(e))
294
+
295
+ # End of app
296
+ if __name__ == "__main__":
297
+
298
+ run_app()
index.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit_option_menu import option_menu
3
+ from chatbotlib import (train_chatbot, chatbot_demo)
4
+
5
+ # displaying the icon image on streamlit app and set the page config.
6
+ st.set_page_config(
7
+ layout="wide",
8
+ page_title="NLP Chatbot Main Page",
9
+ page_icon="💬"
10
+ )
11
+
12
+ # Create sidebar
13
+ with st.sidebar:
14
+
15
+ # icons are located at bootstrap's website: https://icons.getbootstrap.com
16
+ page_selection = option_menu(
17
+ "NLP Chatbot App",
18
+ ["Train the Chatbot Model", "Demo the Chatbot"],
19
+ icons=["gear", "chat-dots"],
20
+ menu_icon="emoji-smile",
21
+ default_index=0,
22
+ orientation="vertical",
23
+ styles={
24
+ "container": {"padding": "5!important", "background-color": "#fafafa"},
25
+ "icon": {"color": "orange", "font-size": "25px"},
26
+ "nav-link": {
27
+ "font-size": "16px",
28
+ "text-align": "left",
29
+ "margin": "0px",
30
+ "--hover-color": "#eee",
31
+ },
32
+ "nav-link-selected": {"background-color": "#0068B5"},
33
+ },
34
+ )
35
+
36
+ # Run the chosen app when selected from the option menu.
37
+ match page_selection:
38
+
39
+ case "Train the Chatbot Model":
40
+ train_chatbot.run_app()
41
+
42
+ case "Demo the Chatbot":
43
+ chatbot_demo.run_app()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ nltk
2
+ keras
3
+ numpy
4
+ pandas
5
+ streamlit==1.22.0
6
+ streamlit-chat==0.0.2.2
7
+ streamlit-option-menu==0.3.5