w3680 commited on
Commit
6848c8b
ยท
1 Parent(s): ae3c993

updated version

Browse files
chat_logic/chat_stream.py CHANGED
@@ -4,14 +4,16 @@ from rag.vectorization_functions import split_documents, create_embedding_vector
4
  # lead ifixit infos
5
  from rag.ifixit_document_retrieval import load_ifixit_guides
6
  #model
7
- from helper_functions.llm_base_client import llm_base_client_init
8
  from chat_logic.prompts import load_prompts
9
-
 
 
10
 
11
  def chatbot_answer(user_query, memory=None, context="", prompt="default", response_type=None, modelname="llama3-8b-8192", temp=0.3):
12
  """
13
 
14
- Gererate a response from the model based on the user's query and chat history.
15
  Can be used for both the first query and follow-up questions by using different prompts.
16
 
17
  Args:
@@ -34,8 +36,10 @@ def chatbot_answer(user_query, memory=None, context="", prompt="default", respo
34
 
35
  if memory:
36
  for user_msg, bot_msg in memory:
37
- messages.append({"role": "user", "content": user_msg})
38
- messages.append({"role": "assistant", "content": bot_msg})
 
 
39
  messages.append({"role": "user", "content": user_query})
40
 
41
  # calling the LLM with the entire chat history in order to get an answer
@@ -46,8 +50,40 @@ def chatbot_answer(user_query, memory=None, context="", prompt="default", respo
46
  )
47
  return chat_completion
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
- def chatbot_interface(history, user_query, response_type=None):
 
 
 
 
 
 
 
 
 
 
 
51
  """
52
 
53
  UI uses this function to handle general chat functionality.
@@ -62,31 +98,101 @@ def chatbot_interface(history, user_query, response_type=None):
62
  list: The model's response added to the chat history.
63
 
64
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  # load guides, create embeddings and return answer for first query
67
- if len(history) == 0:
68
- data = load_ifixit_guides(user_query, debug=True)
69
- chunks = split_documents(data)
70
- global vector_db
71
- vector_db = create_embedding_vector_db(chunks)
72
- context = query_vector_db(user_query, vector_db)
73
- message_content = chatbot_answer(user_query, history, context, prompt="repair_guide", response_type=response_type)
74
- answer = history + [(user_query, message_content.choices[0].message.content)]
75
- return answer
76
-
77
- # answer questions to the guide
78
- else:
79
- context = query_vector_db(user_query, vector_db)
80
- message_content = chatbot_answer(user_query, history, context, prompt="repair_helper", response_type=response_type)
81
- answer = history + [(user_query, message_content.choices[0].message.content)]
82
- return answer
83
-
84
- # Feedback function for thumbs up (chat ends with success message)
85
  def feedback_positive(history):
86
- history.append((None, "๐ŸŽ‰ Great! We're happy to hear that your repair was successful! If you need help in the future, feel free to ask."))
87
- return history
 
 
 
 
 
 
 
88
 
89
- # Feedback function for thumbs down (chat continues)
90
  def feedback_negative(history):
91
- history.append((None, "I'm sorry to hear that. Could you describe the issue further? Maybe we can find another solution."))
92
- return history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  # lead ifixit infos
5
  from rag.ifixit_document_retrieval import load_ifixit_guides
6
  #model
7
+ from helper_functions.llm_client_initialization import llm_base_client_init
8
  from chat_logic.prompts import load_prompts
9
+ from chat_logic.diagnosis import information_extractor
10
+ import time
11
+ import gradio as gr
12
 
13
  def chatbot_answer(user_query, memory=None, context="", prompt="default", response_type=None, modelname="llama3-8b-8192", temp=0.3):
14
  """
15
 
16
+ Generate a response from the model based on the user's query and chat history.
17
  Can be used for both the first query and follow-up questions by using different prompts.
18
 
19
  Args:
 
36
 
37
  if memory:
38
  for user_msg, bot_msg in memory:
39
+ if user_msg and user_msg != None:
40
+ messages.append({"role": "user", "content": user_msg})
41
+ if bot_msg:
42
+ messages.append({"role": "assistant", "content": bot_msg})
43
  messages.append({"role": "user", "content": user_query})
44
 
45
  # calling the LLM with the entire chat history in order to get an answer
 
50
  )
51
  return chat_completion
52
 
53
+ def chatbot_answer_init(user_query, vector_db, history, response_type, prompt, k=5, modelname="llama3-8b-8192", temp=0.3, history_length=5):
54
+ """
55
+ Generate the answer for the answer for the query.
56
+
57
+ Args:
58
+ user_query (str): The user's query.
59
+ vector_db: The vector database to query.
60
+ history (list): The chat history.
61
+ response_type (str): The style of language the answer should use.
62
+ prompt (str): The prompt to load.
63
+ returns:
64
+ answer (list): The model's response added to the chat history.
65
+ """
66
+ if vector_db:
67
+ context = query_vector_db(user_query, vector_db, k)
68
+ else:
69
+ context = ""
70
+
71
+ message_content = chatbot_answer(user_query, history[-(history_length):], context, prompt, response_type, modelname, temp)
72
+ answer = history + [[user_query, message_content.choices[0].message.content]]
73
+ return answer
74
 
75
+ def chatbot_rag_init(user_query):
76
+ """
77
+ Create the vector database for the first query.
78
+ This function loads the guides, splits them into chunks, and creates the embedding vector database.
79
+ """
80
+
81
+ data = load_ifixit_guides(user_query, debug=True)
82
+ chunks = split_documents(data)
83
+ vector_database = create_embedding_vector_db(chunks)
84
+ return vector_database
85
+
86
+ def chatbot_interface(history, user_query, response_type, conversation_state):
87
  """
88
 
89
  UI uses this function to handle general chat functionality.
 
98
  list: The model's response added to the chat history.
99
 
100
  """
101
+ #Diagnose issue
102
+ if conversation_state == 'interactive_diagnosis':
103
+ answer = chatbot_answer_init(user_query, None, history, response_type, prompt="diagnose_issue")
104
+ extracted_info = information_extractor(answer)
105
+
106
+ if any(value == '' or (value is not None and 'none' in value.lower()) or
107
+ (value is not None and 'not specified' in value.lower()) or
108
+ (value is not None and 'unknown' in value.lower())
109
+ for value in extracted_info.values()
110
+ ):
111
+ conversation_state = "interactive_diagnosis"
112
+ else:
113
+ global vector_db
114
+ vector_db = [] # reset vector database to avoid memory issues
115
+ vector_db = chatbot_rag_init(answer[-1][1])
116
 
117
+ repair_question = f"List repair steps for {extracted_info['issue']} of {extracted_info['brand']} {extracted_info['model']}."
118
+
119
+ answer = chatbot_answer_init(repair_question,
120
+ vector_db,
121
+ history,
122
+ response_type,
123
+ prompt="repair_guide",
124
+ k=10,
125
+ modelname="llama-3.1-8b-instant",
126
+ temp=0.3)
127
+ conversation_state = "repair_mode"
128
+
129
+ elif conversation_state == 'repair_mode':
130
+ answer = chatbot_answer_init(user_query,
131
+ vector_db,
132
+ history,
133
+ response_type,
134
+ prompt="repair_helper",
135
+ k=5)
136
  # load guides, create embeddings and return answer for first query
137
+ print("Answer before returning to Handle User INput:", answer)
138
+ return answer, conversation_state
139
+
140
+ def handle_user_input(user_input_text, history, conversation_state, response_type):
141
+ print(conversation_state)
142
+ print(type(conversation_state))
143
+ print("History before calling Chatbot Interface:", history)
144
+
145
+ if conversation_state == "awaiting_support_confirmation":
146
+ yield from support_ticket_needed(user_input_text, history, conversation_state)
147
+ else:
148
+ answer, conversation_state = chatbot_interface(history, user_input_text, response_type, conversation_state)
149
+ print("Answer before returning to Interface Design:", answer)
150
+ print("Conversation state before returning to Interface Design:", conversation_state)
151
+ yield answer, "", conversation_state
152
+
153
+ # Feedback function for thumbs up (chat ends with success message & restarts)
 
154
  def feedback_positive(history):
155
+ history.append([None, "<br><br><br>๐ŸŽ‰ Great! We're happy to hear that your repair was successful! If you need help in the future, feel free to ask. I will automatically restart the chat."])
156
+ print("Chat history:", history)
157
+ conversation_state = "repair_helper"
158
+ yield history, gr.update(value=""), conversation_state # shows message
159
+ time.sleep(5) # short break for message to remain
160
+ history.clear()
161
+ conversation_state = "interactive_diagnosis"
162
+ print("History after clearing:", history)
163
+ yield [], gr.update(value=""), conversation_state # reset chat
164
 
165
+ # Feedback function for thumbs down
166
  def feedback_negative(history):
167
+ history.append((None, "<br><br><br>I'm sorry to hear that. Do you want me to create a support ticket for you so that you can seek professional help?"))
168
+ print("Chat history:", history)
169
+ conversation_state = "awaiting_support_confirmation"
170
+ yield history, conversation_state
171
+
172
+ # Support ticket creation
173
+ def support_ticket_needed(message, history, conversation_state):
174
+ user_message = message.strip().lower()
175
+ history.append((message, None))
176
+ if conversation_state == "awaiting_support_confirmation":
177
+ if "yes" in user_message:
178
+ ticket_text = chatbot_answer_init("Please summarize this history into a support ticket.",
179
+ vector_db,
180
+ history,
181
+ response_type="Technical",
182
+ prompt="support_ticket",
183
+ history_length=len(history)
184
+ )
185
+ history.append((None, f"๐Ÿ› ๏ธ Your support ticket has been created:\n\n{ticket_text[-1][1]}"))
186
+ conversation_state = "repair_helper"
187
+ yield history, "", conversation_state
188
+ elif "no" in user_message:
189
+ history.append((None, "๐Ÿ‘ Ok, I would be happy to help with the next repair problem."))
190
+ yield history, "", conversation_state
191
+ time.sleep(5)
192
+ history.clear()
193
+ conversation_state = "interactive_diagnosis"
194
+ yield history, "", conversation_state
195
+ else:
196
+ history.append((None, "โ“ Please answer with yes or no."))
197
+ yield history, "", conversation_state
198
+
chat_logic/diagnosis.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #%%
2
+ # processing functions
3
+ # lead ifixit infos
4
+
5
+ from langchain.chat_models import init_chat_model
6
+ from langchain.output_parsers import StructuredOutputParser, ResponseSchema
7
+ from langchain.prompts import ChatPromptTemplate
8
+ # import langchain groq package
9
+ from helper_functions.llm_client_initialization import llm_langchain_client_init
10
+ import re
11
+
12
+
13
+ def information_extractor(conversation):
14
+ """
15
+ Extracts the device, brand, model and issue from the conversation using a language model.
16
+ Input: entire diagnosis conversation between bot and user
17
+ Output: dictionary with device, brand, model and issue
18
+ """
19
+ # Initializing the Grog Client
20
+ llm = llm_langchain_client_init()
21
+ # Defining the information we want to extract from the response
22
+ schemas = [
23
+ ResponseSchema(name="device", description="Device or appliance mentioned, if any"), # one line per information that we are trying to extract
24
+ ResponseSchema(name="brand", description="Brand of device or appliance mentioned, if any"), # one line per information that we are trying to extract
25
+ ResponseSchema(name="model", description="Model of device or appliance mentioned, if any"), # one line per information that we are trying to extract
26
+ ResponseSchema(name="issue", description="Main issue or concern with device or appliance, if any. Leave empty if none.")
27
+ ]
28
+
29
+ # Initialization of the parser
30
+ parser = StructuredOutputParser.from_response_schemas(schemas)
31
+
32
+ print(parser)
33
+
34
+ # Defining the chat prompt template
35
+ prompt = ChatPromptTemplate.from_messages([
36
+ ("system", "Extract the following info from the message of the user."),
37
+ ("user", "{text}\n\n{format_instructions}")
38
+ ])
39
+
40
+ # Defining parsing instructions for the output
41
+ #Theoretically, the parser is to generate the parsing instructions automatically,
42
+ # but it cannot tell the client to remove the comments in the output, which leads to errors when parsing the json downstream
43
+ parsing_instructions ='''
44
+ The output should be a markdown code snippet formatted in the following schema,
45
+ including the leading and trailing "```json" and "```", without any additional text or comments:
46
+
47
+ ```json
48
+ {
49
+ "device": string // Device or appliance mentioned, if any
50
+ "brand": string // Brand of device or appliance mentioned, if any
51
+ "model": string // Model of device or appliance mentioned, if any
52
+ "issue": string // Main issue or concern with device or appliance, if any. Leave empty if none.
53
+ }
54
+ ```
55
+ '''
56
+
57
+ # Formattig the prompt with last message in the conversation and instructions to parse the information from that message
58
+ formatted_prompt = prompt.format_prompt(
59
+ text=conversation[-1][1], # The last message in the conversation
60
+ format_instructions=parsing_instructions
61
+ )
62
+
63
+ print('Text parsed by LLM: ', conversation[-1][1])
64
+
65
+ # Calling the LLM with the formatted prompt and parsing the output
66
+ output = llm(formatted_prompt.to_messages())
67
+ print("Output Content: " + output.content)
68
+
69
+ cleaned_content = re.sub(r'//.*$', '', output.content, flags=re.MULTILINE)
70
+
71
+ print("Cleanded Content: " + cleaned_content)
72
+
73
+ parsed_content = parser.parse(cleaned_content)
74
+ print("Dictionary: ", parsed_content)
75
+ return parsed_content
chat_logic/prompts.py CHANGED
@@ -1,4 +1,4 @@
1
- def load_prompts(prompt, context="", response_type=None):
2
  """
3
  Load the prompts from a file or define them in the code.
4
 
@@ -16,16 +16,16 @@ def load_prompts(prompt, context="", response_type=None):
16
  if response_type == "Simple Language":
17
  response_type = "Use plain language and explain so that a 5th grader would understand."
18
 
19
- if response_type == "Technical":
20
  response_type = "Use technical jargon and provide detailed explanations."
21
 
22
- if response_type == "Homer Simpson Language":
23
  response_type = "Use simple language and explain it like Homer Simpson would."
24
 
25
- if response_type == "Sarcasm":
26
  response_type = "Use sarcastic language and tone."
27
 
28
- if response_type is None:
29
  response_type = ""
30
 
31
  # choose prompt and append response_type
@@ -33,15 +33,36 @@ def load_prompts(prompt, context="", response_type=None):
33
  prompt = ("""You are a helpful assistant that helps users with the repair of their devices.
34
  Ask them if they need help with a repair.
35
  If they do, ask them to provide the device name and model. """ + response_type)
 
 
 
 
 
 
 
 
 
 
36
 
37
- if prompt == "repair_guide":
38
  prompt = (f"List repair steps for the Problem. Use the following context:\n{context}. " + response_type)
39
 
40
- if prompt == "repair_helper":
41
  prompt = (f"Answer the users question about the guide. Use the following context:\n{context}. " + response_type)
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- return prompt
44
 
45
-
46
-
47
-
 
1
+ def load_prompts(prompt="default", context="", response_type=None):
2
  """
3
  Load the prompts from a file or define them in the code.
4
 
 
16
  if response_type == "Simple Language":
17
  response_type = "Use plain language and explain so that a 5th grader would understand."
18
 
19
+ elif response_type == "Technical":
20
  response_type = "Use technical jargon and provide detailed explanations."
21
 
22
+ elif response_type == "Homer Simpson Language":
23
  response_type = "Use simple language and explain it like Homer Simpson would."
24
 
25
+ elif response_type == "Sarcasm":
26
  response_type = "Use sarcastic language and tone."
27
 
28
+ elif response_type is None:
29
  response_type = ""
30
 
31
  # choose prompt and append response_type
 
33
  prompt = ("""You are a helpful assistant that helps users with the repair of their devices.
34
  Ask them if they need help with a repair.
35
  If they do, ask them to provide the device name and model. """ + response_type)
36
+
37
+ elif prompt == "diagnose_issue":
38
+ prompt = ("""
39
+ You are a helpful assistant.
40
+ Your job is to determine if an appliance or device, the brand of the appliance or device,
41
+ the model of the appliance or device and the user's issue with the applicance or device
42
+ were mentioned in the user's message.
43
+ If yes, extract the appliance or device, its model and its issue and confirm it back to the user and stop asking for information.
44
+ If not, continue to ask the user to provide the missing information until they provide it.
45
+ Do not provide troubleshooting steps or solutions.""" + response_type)
46
 
47
+ elif prompt == "repair_guide":
48
  prompt = (f"List repair steps for the Problem. Use the following context:\n{context}. " + response_type)
49
 
50
+ elif prompt == "repair_helper":
51
  prompt = (f"Answer the users question about the guide. Use the following context:\n{context}. " + response_type)
52
+
53
+ # create support ticket
54
+ elif prompt == "support_ticket":
55
+ prompt = ("""
56
+ You are a technical support assistant. Based on the user's input, generate a structured support ticket with the following fields:
57
+ 1. Device Type
58
+ 2. Brand and Model
59
+ 3. Serial Number (if available)
60
+ 4. Problem Description
61
+ 5. Troubleshooting Steps Already Taken
62
+ 6. Additional Notes (if available)
63
 
64
+ Ensure the ticket is clear and concise, suitable for submission to a professional repair service.
65
 
66
+ """ + response_type)
67
+
68
+ return prompt
helper_functions/llm_base_client.py DELETED
@@ -1,12 +0,0 @@
1
- #%%
2
- import os
3
- from dotenv import load_dotenv
4
- from groq import Groq
5
-
6
- def llm_base_client_init():
7
- load_dotenv()
8
- groq_key = os.getenv('GROQ_API_KEY')
9
- client = Groq(api_key=groq_key)
10
-
11
- return client
12
-
 
 
 
 
 
 
 
 
 
 
 
 
 
helper_functions/llm_client_initialization.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #%%
2
+ import os
3
+ from dotenv import load_dotenv
4
+ from groq import Groq
5
+ from langchain_groq import ChatGroq
6
+
7
+ def llm_base_client_init():
8
+ load_dotenv()
9
+ groq_key = os.getenv('GROQ_API_KEY')
10
+ client = Groq(api_key=groq_key)
11
+
12
+ return client
13
+
14
+ def llm_langchain_client_init(modelname="llama-3.1-8b-instant", temp=0.2):
15
+ '''
16
+ Initializes the LLM client using the langchain_groq package.
17
+ Parameters:
18
+ modelname (str): The name of the model to use. Other models: "meta-llama/llama-4-scout-17b-16e-instruct"
19
+ '''
20
+ load_dotenv()
21
+ groq_key = os.getenv('GROQ_API_KEY')
22
+
23
+ client = ChatGroq(
24
+ model=modelname,
25
+ temperature=temp,
26
+ max_tokens=None,
27
+ timeout=None,
28
+ max_retries=2,
29
+ api_key=groq_key
30
+ )
31
+ return client
helper_functions/load_embed_model.py ADDED
@@ -0,0 +1 @@
 
 
1
+ embedding_model = None
main.py CHANGED
@@ -5,9 +5,13 @@ from helper_functions.checkpoint import checkpoint
5
 
6
  # get interface design
7
  from ui.interface_design import interface_init
8
-
 
9
 
10
  def main():
 
 
 
11
  interface_init()
12
 
13
 
 
5
 
6
  # get interface design
7
  from ui.interface_design import interface_init
8
+ from langchain_huggingface import HuggingFaceEmbeddings
9
+ import helper_functions.load_embed_model
10
 
11
  def main():
12
+ helper_functions.load_embed_model.embedding_model = HuggingFaceEmbeddings(
13
+ model_name='sentence-transformers/all-MiniLM-L6-v2'
14
+ )
15
  interface_init()
16
 
17
 
rag/ifixit_document_retrieval.py CHANGED
@@ -1,6 +1,6 @@
1
 
2
  from langchain_community.document_loaders import IFixitLoader
3
- from helper_functions.llm_base_client import llm_base_client_init
4
  #function for rewriting info into searchphrase
5
  def write_searchphrase(search_info: str, debug: bool = False):
6
  """
 
1
 
2
  from langchain_community.document_loaders import IFixitLoader
3
+ from helper_functions.llm_client_initialization import llm_base_client_init
4
  #function for rewriting info into searchphrase
5
  def write_searchphrase(search_info: str, debug: bool = False):
6
  """
rag/vectorization_functions.py CHANGED
@@ -2,11 +2,10 @@
2
  #%%
3
  # General
4
  from langchain_text_splitters import RecursiveCharacterTextSplitter
5
- from langchain_huggingface import HuggingFaceEmbeddings
6
  from langchain_community.vectorstores import FAISS
7
- from helper_functions.llm_base_client import llm_base_client_init
8
 
9
- def split_documents(documents, chunk_size=800, chunk_overlap=80): # check chunk size and overlap for our purpose
10
  """
11
  This function splits documents into chunks of given size and overlap.
12
 
@@ -20,7 +19,8 @@ def split_documents(documents, chunk_size=800, chunk_overlap=80): # check chunk
20
  """
21
  text_splitter = RecursiveCharacterTextSplitter(
22
  chunk_size=chunk_size,
23
- chunk_overlap=chunk_overlap
 
24
  )
25
  chunks = text_splitter.split_documents(documents=documents)
26
  return chunks
@@ -37,19 +37,16 @@ def create_embedding_vector_db(chunks):
37
  Returns:
38
  vector_db: The vector database containing the embedded chunks.
39
  """
40
- # instantiate embedding model
41
- embedding = HuggingFaceEmbeddings(
42
- model_name='sentence-transformers/all-MiniLM-L6-v2' # EMBEDDING MODEL! converts text to vector ( stick to it)
43
- )
44
  # create the vector store
45
- vector_db = FAISS.from_documents( # stores embeddings # from_documents includes metadata
 
46
  documents=chunks,
47
- embedding=embedding
48
  )
49
- return vector_db # optimize
50
 
51
  # Function to query the vector database and interact with Groq
52
- def query_vector_db(query, vector_db):
53
  """
54
  This function queries the vector database with the user query and retrieves relevant documents
55
 
@@ -62,7 +59,7 @@ def query_vector_db(query, vector_db):
62
 
63
  """
64
  # Retrieve relevant documents
65
- docs = vector_db.similarity_search(query, k=3) # neigbors k are the chunks # similarity_search: FAISS function
66
  context = "\n".join([doc.page_content for doc in docs])
67
 
68
  return context
 
2
  #%%
3
  # General
4
  from langchain_text_splitters import RecursiveCharacterTextSplitter
 
5
  from langchain_community.vectorstores import FAISS
6
+ import helper_functions.load_embed_model
7
 
8
+ def split_documents(documents, chunk_size=900, chunk_overlap=90): # check chunk size and overlap for our purpose
9
  """
10
  This function splits documents into chunks of given size and overlap.
11
 
 
19
  """
20
  text_splitter = RecursiveCharacterTextSplitter(
21
  chunk_size=chunk_size,
22
+ chunk_overlap=chunk_overlap,
23
+ separators=["###", "Step ", "\n\n", "\n", ".", " ", ""] # check separators for our purpose
24
  )
25
  chunks = text_splitter.split_documents(documents=documents)
26
  return chunks
 
37
  Returns:
38
  vector_db: The vector database containing the embedded chunks.
39
  """
 
 
 
 
40
  # create the vector store
41
+ embed_model = helper_functions.load_embed_model.embedding_model # load embedding model
42
+ vector_database = FAISS.from_documents( # stores embeddings # from_documents includes metadata
43
  documents=chunks,
44
+ embedding=embed_model
45
  )
46
+ return vector_database # optimize
47
 
48
  # Function to query the vector database and interact with Groq
49
+ def query_vector_db(query, vector_db, k):
50
  """
51
  This function queries the vector database with the user query and retrieves relevant documents
52
 
 
59
 
60
  """
61
  # Retrieve relevant documents
62
+ docs = vector_db.similarity_search(query, k) # neigbors k are the chunks # similarity_search: FAISS function
63
  context = "\n".join([doc.page_content for doc in docs])
64
 
65
  return context
ui/custom_css.py CHANGED
@@ -1,5 +1,3 @@
1
- # Load a custom CSS for Gradio interface
2
-
3
  def custom_css():
4
  """
5
  Custom CSS for Gradio interface to style buttons, chat container, and background.
@@ -9,6 +7,53 @@ def custom_css():
9
  """
10
  custom_css = """
11
  <style>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  .submit-button {
13
  background-color: #E69A8D !important; /* Coral Red */
14
  color: white !important;
@@ -20,21 +65,31 @@ def custom_css():
20
  cursor: pointer;
21
  border-radius: 5px;
22
  }
 
23
  .submit-button:hover {
24
  background-color: #D17F73 !important;
25
  }
26
- .chat-container {
27
- max-height: 500px;
28
- overflow-y: auto;
29
- }
30
  .feedback-buttons {
31
  display: flex;
32
  gap: 10px;
33
  margin-top: 5px;
34
  }
35
- .gradio-container {
36
- background-color: #74BA9C !important;
 
 
 
 
 
 
 
 
 
 
 
37
  }
38
  </style>
39
  """
40
- return custom_css
 
 
 
1
  def custom_css():
2
  """
3
  Custom CSS for Gradio interface to style buttons, chat container, and background.
 
7
  """
8
  custom_css = """
9
  <style>
10
+ /* Overall container for Gradio interface */
11
+ .gradio-container {
12
+ background-color: #74BA9C !important;
13
+ display: flex !important;
14
+ flex-direction: row; /* Make the left and right sections side by side */
15
+ min-height: 100vh;
16
+ padding: 20px;
17
+ box-sizing: border-box;
18
+ width: 100%;
19
+ overflow: visible !important;
20
+ }
21
+
22
+ /* Left container for logo, input, and buttons */
23
+ .gradio-left-container {
24
+ display: flex;
25
+ flex-direction: column;
26
+ width: 40%; /* Adjust width as needed */
27
+ padding-right: 20px; /* Space between left and right sections */
28
+ }
29
+
30
+ /* Right container for the chat output */
31
+ .gradio-right-container {
32
+ display: flex;
33
+ flex-direction: column;
34
+ flex: 1;
35
+ margin-left: 20px; /* Space between left and right sections */
36
+ }
37
+
38
+ /* Style the logo */
39
+ #logo img {
40
+ width: auto;
41
+ height: 190px; /* Adjust the height of the logo */
42
+ max-width: 100%;
43
+ margin-bottom: 20px; /* Add margin for spacing between logo and other elements */
44
+ }
45
+
46
+ #logo {
47
+ background-color: #ffffff !important;
48
+ }
49
+
50
+ /* Make the input text box expand to fill available space */
51
+ .input-textbox {
52
+ flex-grow: 1;
53
+ height: 100px;
54
+ }
55
+
56
+ /* Submit button style */
57
  .submit-button {
58
  background-color: #E69A8D !important; /* Coral Red */
59
  color: white !important;
 
65
  cursor: pointer;
66
  border-radius: 5px;
67
  }
68
+
69
  .submit-button:hover {
70
  background-color: #D17F73 !important;
71
  }
72
+
73
+ /* Feedback buttons layout */
 
 
74
  .feedback-buttons {
75
  display: flex;
76
  gap: 10px;
77
  margin-top: 5px;
78
  }
79
+
80
+ /* Ensure the full page is responsive */
81
+ body {
82
+ margin: 0;
83
+ padding: 0;
84
+ height: auto;
85
+ overflow-y: auto;
86
+ }
87
+
88
+ .gradio-row {
89
+ display: flex;
90
+ justify-content: flex-start;
91
+ align-items: flex-start;
92
  }
93
  </style>
94
  """
95
+ return custom_css
ui/interface_design.py CHANGED
@@ -1,48 +1,79 @@
1
-
2
  #%%
3
-
4
  import gradio as gr
5
- from chat_logic.chat_stream import chatbot_interface, feedback_positive, feedback_negative
6
  from ui.custom_css import custom_css
7
 
8
  def interface_init():
9
  """
10
  Initialize the Gradio interface for the Repair Assistant chatbot.
11
-
12
  """
13
-
14
  logo_path = "./images/logo.png"
15
 
16
  # Gradio UI
17
  with gr.Blocks() as app:
18
- gr.Image(logo_path, elem_id="logo", show_label=False)
19
  gr.HTML(custom_css()) # Insert custom CSS
20
- gr.Markdown("### Repair Assistant - Fix smarter with AI")
21
- gr.Markdown("State your repair topic, select your response style and start chatting.")
22
 
23
- # Chat interface & state
24
- chat_history = gr.State([])
25
- chatbot = gr.Chatbot()
26
- user_input = gr.Textbox(placeholder="What would you like to repair? Please name make, model and problem.")
27
- response_type = gr.Radio(["Simple Language", "Technical", "Homer Simpson Language", "Sarcasm"], label="Answer Style")
28
- submit_btn = gr.Button("Submit", elem_classes="submit-button")
29
 
30
- submit_btn.click(fn=chatbot_interface, inputs=[chatbot, user_input, response_type], outputs=chatbot)
31
- user_input.submit(chatbot_interface, [chatbot, user_input, response_type], chatbot)
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
- # "Did the repair work?" label
34
- gr.Markdown("**Did the repair work?**")
35
 
36
- # Feedback buttons (not functional yet)
37
- with gr.Row(elem_classes="feedback-buttons"):
38
- thumbs_up = gr.Button("๐Ÿ‘ Yes")
39
- thumbs_down = gr.Button("๐Ÿ‘Ž No")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  # Connect thumbs up to success message (stops chat)
42
- thumbs_up.click(fn=feedback_positive, inputs=[chat_history], outputs=chatbot)
 
 
 
 
43
 
44
- # Connect thumbs down to continue troubleshooting
45
- thumbs_down.click(fn=feedback_negative, inputs=[chat_history], outputs=chatbot)
46
-
 
 
 
 
47
  app.queue().launch()
48
-
 
 
1
  #%%
 
2
  import gradio as gr
3
+ from chat_logic.chat_stream import chatbot_interface, feedback_positive, feedback_negative, handle_user_input
4
  from ui.custom_css import custom_css
5
 
6
  def interface_init():
7
  """
8
  Initialize the Gradio interface for the Repair Assistant chatbot.
 
9
  """
10
+
11
  logo_path = "./images/logo.png"
12
 
13
  # Gradio UI
14
  with gr.Blocks() as app:
 
15
  gr.HTML(custom_css()) # Insert custom CSS
 
 
16
 
17
+ # Create a row for the layout
18
+ with gr.Row():
19
+ # Left container for the logo, input, and buttons
20
+ with gr.Column(scale=1, elem_id="gradio-left-container"):
21
+ # Logo
22
+ gr.Image(logo_path, elem_id="logo", show_label=False, show_fullscreen_button=False)
23
 
24
+ response_type = gr.Radio(
25
+ ["Simple Language", "Technical", "Homer Simpson Language", "Sarcasm"],
26
+ label="Answer Style"
27
+ )
28
+
29
+ # Feedback section (thumbs up and thumbs down buttons, and markdown)
30
+ gr.Markdown("๐Ÿ› ๏ธ **Did the repair work?**")
31
+ with gr.Row(elem_classes="feedback-buttons"):
32
+ thumbs_up = gr.Button("๐Ÿ‘ Yes")
33
+ thumbs_down = gr.Button("๐Ÿ‘Ž No")
34
+
35
+ # Right container for the chat output and feedback buttons
36
+ with gr.Column(scale=2, elem_id="gradio-right-container"):
37
+ # Chat history output
38
 
39
+ # chat_history = gr.State([]) # For maintaining the chat state
40
+ conversation_state = gr.State("interactive_diagnosis") # For awaiting the users response if support ticket is needed
41
 
42
+ chatbot = gr.Chatbot(elem_id="chat-container")
43
+
44
+ # Input components
45
+ user_input = gr.Textbox(
46
+ label="Pick an answer style and let the Repair Assistant help you!",
47
+ placeholder="Your input here",
48
+
49
+ elem_classes="input-textbox"
50
+ )
51
+ submit_btn = gr.Button("Submit", elem_classes="submit-button")
52
+
53
+ submit_btn.click(
54
+ fn=handle_user_input,
55
+ inputs=[user_input, chatbot, conversation_state, response_type],
56
+ outputs=[chatbot, user_input, conversation_state]
57
+ )
58
+
59
+ user_input.submit(
60
+ fn=handle_user_input,
61
+ inputs=[user_input, chatbot, conversation_state, response_type],
62
+ outputs=[chatbot, user_input, conversation_state]
63
+ )
64
 
65
  # Connect thumbs up to success message (stops chat)
66
+ thumbs_up.click(
67
+ fn=feedback_positive,
68
+ inputs=[chatbot],
69
+ outputs=[chatbot, user_input, conversation_state]
70
+ )
71
 
72
+ # Connect thumbs down
73
+ thumbs_down.click(
74
+ fn=feedback_negative,
75
+ inputs=[chatbot],
76
+ outputs=[chatbot, conversation_state]
77
+ )
78
+
79
  app.queue().launch()