w3680 commited on
Commit
ae3c993
ยท
1 Parent(s): cce0c89

Version 2

Browse files
chat_logic/chat_stream.py CHANGED
@@ -5,58 +5,30 @@ from rag.vectorization_functions import split_documents, create_embedding_vector
5
  from rag.ifixit_document_retrieval import load_ifixit_guides
6
  #model
7
  from helper_functions.llm_base_client import llm_base_client_init
 
8
 
9
 
10
- def chatbot_interface(history, user_query):
11
  """
12
 
13
- LLM Model is defined here.
14
- Chat history use and chat with user coded here.
15
-
16
- """
17
-
18
- if not user_query.strip():
19
- return history + [(user_query, "Hey, I'd love to help you! What can I do for you?")]
20
-
21
- messages = [{"role": "system",
22
- "content": """You are a helpful assistant
23
- that helps users with the repair of their devices. Ask them if they need help with a repair.
24
- If they do, ask them to provide the device name and model."""}]
25
-
26
- if history:
27
- for user_msg, bot_msg in history:
28
- messages.append({"role": "user", "content": user_msg})
29
- messages.append({"role": "assistant", "content": bot_msg})
30
- messages.append({"role": "user", "content": user_query})
31
- print(messages)
32
-
33
- client = llm_base_client_init()
34
 
35
- chat_completion = client.chat.completions.create(
36
- messages=messages,
37
- model="llama3-8b-8192",
38
- temperature=0.3
39
- )
40
-
41
- return history + [(user_query, chat_completion.choices[0].message.content)]
 
42
 
43
- #%%
44
- # processing functions
45
- from rag.vectorization_functions import split_documents, create_embedding_vector_db, query_vector_db
46
- # lead ifixit infos
47
- from rag.ifixit_document_retrieval import load_ifixit_guides
48
- #model
49
- from helper_functions.llm_base_client import llm_base_client_init
50
- from chat_logic.prompts import load_prompts
51
-
52
- def chatbot_answer(user_query, memory=None, context="", prompt="default", modelname="llama3-8b-8192", temp=0.3):
53
- """
54
-
55
- Chat history use and chat with user coded here.
56
 
57
  """
58
  client = llm_base_client_init()
59
- answer_prompt = load_prompts(prompt, context)
60
  messages = [{"role": "system",
61
  "content": answer_prompt}]
62
 
@@ -75,11 +47,19 @@ def chatbot_answer(user_query, memory=None, context="", prompt="default", model
75
  return chat_completion
76
 
77
 
78
- def chatbot_interface(history, user_query):
79
  """
80
 
81
- LLM Model is defined here.
82
- Chat history use and chat with user coded here.
 
 
 
 
 
 
 
 
83
 
84
  """
85
 
@@ -90,31 +70,17 @@ def chatbot_interface(history, user_query):
90
  global vector_db
91
  vector_db = create_embedding_vector_db(chunks)
92
  context = query_vector_db(user_query, vector_db)
93
- message_content = chatbot_answer(user_query, history, context, prompt="repair_guide")
94
  answer = history + [(user_query, message_content.choices[0].message.content)]
95
  return answer
96
 
97
  # answer questions to the guide
98
  else:
99
  context = query_vector_db(user_query, vector_db)
100
- message_content = chatbot_answer(user_query, history, context, prompt="repair_helper")
101
  answer = history + [(user_query, message_content.choices[0].message.content)]
102
  return answer
103
-
104
-
105
-
106
- # Not implemented yet:
107
- def answer_style(history, user_query, response_type):
108
- response = f"Suggested repair steps for: {user_query}\n\n"
109
- if response_type == "Simple Language":
110
- response += "Please provide a clear and easy-to-understand explanation."
111
- elif response_type == "Technical":
112
- response += "Provide a detailed technical breakdown of the repair process."
113
-
114
- history.append((user_query, response)) # Append to chat history
115
- return history
116
-
117
-
118
  # Feedback function for thumbs up (chat ends with success message)
119
  def feedback_positive(history):
120
  history.append((None, "๐ŸŽ‰ Great! We're happy to hear that your repair was successful! If you need help in the future, feel free to ask."))
 
5
  from rag.ifixit_document_retrieval import load_ifixit_guides
6
  #model
7
  from helper_functions.llm_base_client import llm_base_client_init
8
+ from chat_logic.prompts import load_prompts
9
 
10
 
11
+ def chatbot_answer(user_query, memory=None, context="", prompt="default", response_type=None, modelname="llama3-8b-8192", temp=0.3):
12
  """
13
 
14
+ Gererate a response from the model based on the user's query and chat history.
15
+ Can be used for both the first query and follow-up questions by using different prompts.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ Args:
18
+ user_query (str): The user's query.
19
+ memory (list): The chat history.
20
+ context (str): The context to use in the prompt.
21
+ prompt (str): The prompt to load.
22
+ response_type (str): The style of language the answer should use.
23
+ modelname (str): The name of the model to use.
24
+ temp (float): The temperature for the model.
25
 
26
+ Returns:
27
+ str: The model's response.
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  """
30
  client = llm_base_client_init()
31
+ answer_prompt = load_prompts(prompt, context, response_type)
32
  messages = [{"role": "system",
33
  "content": answer_prompt}]
34
 
 
47
  return chat_completion
48
 
49
 
50
+ def chatbot_interface(history, user_query, response_type=None):
51
  """
52
 
53
+ UI uses this function to handle general chat functionality.
54
+ Order of operations is also defined here.
55
+
56
+ Args:
57
+ history (list): The chat history.
58
+ user_query (str): The user's query.
59
+ response_type (str): The style of language the answer should use.
60
+
61
+ Returns:
62
+ list: The model's response added to the chat history.
63
 
64
  """
65
 
 
70
  global vector_db
71
  vector_db = create_embedding_vector_db(chunks)
72
  context = query_vector_db(user_query, vector_db)
73
+ message_content = chatbot_answer(user_query, history, context, prompt="repair_guide", response_type=response_type)
74
  answer = history + [(user_query, message_content.choices[0].message.content)]
75
  return answer
76
 
77
  # answer questions to the guide
78
  else:
79
  context = query_vector_db(user_query, vector_db)
80
+ message_content = chatbot_answer(user_query, history, context, prompt="repair_helper", response_type=response_type)
81
  answer = history + [(user_query, message_content.choices[0].message.content)]
82
  return answer
83
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  # Feedback function for thumbs up (chat ends with success message)
85
  def feedback_positive(history):
86
  history.append((None, "๐ŸŽ‰ Great! We're happy to hear that your repair was successful! If you need help in the future, feel free to ask."))
chat_logic/prompts.py CHANGED
@@ -1,20 +1,47 @@
1
- def load_prompts(prompt, context=""):
2
  """
3
  Load the prompts from a file or define them in the code.
4
 
 
 
 
 
 
 
 
 
5
  """
6
- # You can load these prompts
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
 
8
  if prompt == "default":
9
- return """You are a helpful assistant that helps users with the repair of their devices.
10
  Ask them if they need help with a repair.
11
- If they do, ask them to provide the device name and model."""
12
 
13
  if prompt == "repair_guide":
14
- return (f"List repair steps for the Problem. Use the following context:\n{context}")
15
 
16
  if prompt == "repair_helper":
17
- return (f"Answer the users question about the guide. Use the following context:\n{context}")
 
 
 
18
 
19
 
20
 
 
1
+ def load_prompts(prompt, context="", response_type=None):
2
  """
3
  Load the prompts from a file or define them in the code.
4
 
5
+ Args:
6
+ prompt (str): The prompt to load.
7
+ context (str): The context to use in the prompt.
8
+ response_type (str): The style of language the answer should use.
9
+
10
+ Returns:
11
+ str: The loaded prompt.
12
+
13
  """
14
+
15
+ # choose response_type
16
+ if response_type == "Simple Language":
17
+ response_type = "Use plain language and explain so that a 5th grader would understand."
18
+
19
+ if response_type == "Technical":
20
+ response_type = "Use technical jargon and provide detailed explanations."
21
+
22
+ if response_type == "Homer Simpson Language":
23
+ response_type = "Use simple language and explain it like Homer Simpson would."
24
+
25
+ if response_type == "Sarcasm":
26
+ response_type = "Use sarcastic language and tone."
27
+
28
+ if response_type is None:
29
+ response_type = ""
30
 
31
+ # choose prompt and append response_type
32
  if prompt == "default":
33
+ prompt = ("""You are a helpful assistant that helps users with the repair of their devices.
34
  Ask them if they need help with a repair.
35
+ If they do, ask them to provide the device name and model. """ + response_type)
36
 
37
  if prompt == "repair_guide":
38
+ prompt = (f"List repair steps for the Problem. Use the following context:\n{context}. " + response_type)
39
 
40
  if prompt == "repair_helper":
41
+ prompt = (f"Answer the users question about the guide. Use the following context:\n{context}. " + response_type)
42
+
43
+ return prompt
44
+
45
 
46
 
47
 
rag/ifixit_document_retrieval.py CHANGED
@@ -8,6 +8,9 @@ def write_searchphrase(search_info: str, debug: bool = False):
8
 
9
  Args:
10
  search_info (str): The information to be turned into a searchphrase.
 
 
 
11
  """
12
  client = llm_base_client_init()
13
 
 
8
 
9
  Args:
10
  search_info (str): The information to be turned into a searchphrase.
11
+
12
+ Returns:
13
+ str: The rewritten searchphrase.
14
  """
15
  client = llm_base_client_init()
16
 
rag/vectorization_functions.py CHANGED
@@ -8,7 +8,15 @@ from helper_functions.llm_base_client import llm_base_client_init
8
 
9
  def split_documents(documents, chunk_size=800, chunk_overlap=80): # check chunk size and overlap for our purpose
10
  """
11
- this function splits documents into chunks of given size and overlap
 
 
 
 
 
 
 
 
12
  """
13
  text_splitter = RecursiveCharacterTextSplitter(
14
  chunk_size=chunk_size,
@@ -17,12 +25,17 @@ def split_documents(documents, chunk_size=800, chunk_overlap=80): # check chunk
17
  chunks = text_splitter.split_documents(documents=documents)
18
  return chunks
19
 
20
- def create_embedding_vector_db(chunks #, db_name, target_directory=f"../vector_databases"
21
- ):
22
  """
23
- this function uses the open-source embedding model HuggingFaceEmbeddings
24
  to create embeddings and store those in a vector database called FAISS,
25
  which allows for efficient similarity search
 
 
 
 
 
 
26
  """
27
  # instantiate embedding model
28
  embedding = HuggingFaceEmbeddings(
@@ -35,23 +48,22 @@ def create_embedding_vector_db(chunks #, db_name, target_directory=f"../vector_d
35
  )
36
  return vector_db # optimize
37
 
38
- #Function to query the vector database and interact with Groq
39
  def query_vector_db(query, vector_db):
 
 
 
 
 
 
 
 
 
 
 
40
  # Retrieve relevant documents
41
  docs = vector_db.similarity_search(query, k=3) # neigbors k are the chunks # similarity_search: FAISS function
42
  context = "\n".join([doc.page_content for doc in docs])
43
 
44
  return context
45
 
46
- # client = llm_base_client_init()
47
- # # Interact with Groq API
48
- # chat_completion = client.chat.completions.create(
49
- # messages=[
50
- # {"role": "system",
51
- # "content": f"List repair steps for the Problem. Use the following context:\n{context}"},
52
- # {"role": "user", "content": query},
53
- # ],
54
- # model="llama3-8b-8192",
55
- # temperature=0.3 # optional: check best value!
56
- # )
57
- # return chat_completion.choices[0].message.content
 
8
 
9
  def split_documents(documents, chunk_size=800, chunk_overlap=80): # check chunk size and overlap for our purpose
10
  """
11
+ This function splits documents into chunks of given size and overlap.
12
+
13
+ Args:
14
+ documents (list): List of documents to be split.
15
+ chunk_size (int): Size of each chunk.
16
+ chunk_overlap (int): Overlap between chunks.
17
+
18
+ Returns:
19
+ list: List of text chunks.
20
  """
21
  text_splitter = RecursiveCharacterTextSplitter(
22
  chunk_size=chunk_size,
 
25
  chunks = text_splitter.split_documents(documents=documents)
26
  return chunks
27
 
28
+ def create_embedding_vector_db(chunks):
 
29
  """
30
+ Uses the open-source embedding model HuggingFaceEmbeddings
31
  to create embeddings and store those in a vector database called FAISS,
32
  which allows for efficient similarity search
33
+
34
+ Args:
35
+ chunks (list): List of text chunks to be embedded.
36
+
37
+ Returns:
38
+ vector_db: The vector database containing the embedded chunks.
39
  """
40
  # instantiate embedding model
41
  embedding = HuggingFaceEmbeddings(
 
48
  )
49
  return vector_db # optimize
50
 
51
+ # Function to query the vector database and interact with Groq
52
  def query_vector_db(query, vector_db):
53
+ """
54
+ This function queries the vector database with the user query and retrieves relevant documents
55
+
56
+ Args:
57
+ query (str): The user query.
58
+ vector_db: The vector database to query.
59
+
60
+ Returns:
61
+ str: The context retrieved from the vector database.
62
+
63
+ """
64
  # Retrieve relevant documents
65
  docs = vector_db.similarity_search(query, k=3) # neigbors k are the chunks # similarity_search: FAISS function
66
  context = "\n".join([doc.page_content for doc in docs])
67
 
68
  return context
69
 
 
 
 
 
 
 
 
 
 
 
 
 
ui/custom_css.py CHANGED
@@ -1,5 +1,12 @@
 
1
 
2
  def custom_css():
 
 
 
 
 
 
3
  custom_css = """
4
  <style>
5
  .submit-button {
@@ -25,6 +32,9 @@ def custom_css():
25
  gap: 10px;
26
  margin-top: 5px;
27
  }
 
 
 
28
  </style>
29
  """
30
  return custom_css
 
1
+ # Load a custom CSS for Gradio interface
2
 
3
  def custom_css():
4
+ """
5
+ Custom CSS for Gradio interface to style buttons, chat container, and background.
6
+
7
+ Returns:
8
+ str: Custom CSS styles.
9
+ """
10
  custom_css = """
11
  <style>
12
  .submit-button {
 
32
  gap: 10px;
33
  margin-top: 5px;
34
  }
35
+ .gradio-container {
36
+ background-color: #74BA9C !important;
37
+ }
38
  </style>
39
  """
40
  return custom_css
ui/interface_design.py CHANGED
@@ -1,15 +1,15 @@
1
 
2
  #%%
3
 
4
-
5
- #NEW
6
-
7
  import gradio as gr
8
- import os
9
  from chat_logic.chat_stream import chatbot_interface, feedback_positive, feedback_negative
10
  from ui.custom_css import custom_css
11
 
12
  def interface_init():
 
 
 
 
13
 
14
  logo_path = "./images/logo.png"
15
 
@@ -20,43 +20,29 @@ def interface_init():
20
  gr.Markdown("### Repair Assistant - Fix smarter with AI")
21
  gr.Markdown("State your repair topic, select your response style and start chatting.")
22
 
23
- # Input field
24
- #question = gr.Textbox(label="Your Question", placeholder="What would you like to repair? Please name make, model and problem.")
25
-
26
- # Submit button
27
- #submit_button = gr.Button("Submit", elem_classes="submit-button")
28
-
29
  # Chat interface & state
30
  chat_history = gr.State([])
31
  chatbot = gr.Chatbot()
32
  user_input = gr.Textbox(placeholder="What would you like to repair? Please name make, model and problem.")
 
33
  submit_btn = gr.Button("Submit", elem_classes="submit-button")
34
 
35
- submit_btn.click(chatbot_interface, [chatbot, user_input], chatbot)
36
- user_input.submit(chatbot_interface, [chatbot, user_input], chatbot)
37
-
38
- # Response style selection
39
- response_type = gr.Radio(["Simple Language", "Technical"], label="Answer Style")
40
-
41
- # Connect the start button to chat initialization
42
- #submit_button.click(fn=start_chat, inputs=[question,response_type], outputs=[chat_history, chatbot, chatbot])
43
 
44
  # "Did the repair work?" label
45
  gr.Markdown("**Did the repair work?**")
46
 
47
- # Feedback buttons
48
  with gr.Row(elem_classes="feedback-buttons"):
49
  thumbs_up = gr.Button("๐Ÿ‘ Yes")
50
  thumbs_down = gr.Button("๐Ÿ‘Ž No")
51
 
52
- # Connect submit button to chatbot function
53
- #submit_button.click(fn=repair_assistant, inputs=[chat_history, question, response_type], outputs=chatbot)
54
-
55
  # Connect thumbs up to success message (stops chat)
56
- #thumbs_up.click(fn=feedback_positive, inputs=[chat_history], outputs=chatbot)
57
 
58
  # Connect thumbs down to continue troubleshooting
59
- # thumbs_down.click(fn=feedback_negative, inputs=[chat_history], outputs=chatbot)
 
60
  app.queue().launch()
61
 
62
- # %%
 
1
 
2
  #%%
3
 
 
 
 
4
  import gradio as gr
 
5
  from chat_logic.chat_stream import chatbot_interface, feedback_positive, feedback_negative
6
  from ui.custom_css import custom_css
7
 
8
  def interface_init():
9
+ """
10
+ Initialize the Gradio interface for the Repair Assistant chatbot.
11
+
12
+ """
13
 
14
  logo_path = "./images/logo.png"
15
 
 
20
  gr.Markdown("### Repair Assistant - Fix smarter with AI")
21
  gr.Markdown("State your repair topic, select your response style and start chatting.")
22
 
 
 
 
 
 
 
23
  # Chat interface & state
24
  chat_history = gr.State([])
25
  chatbot = gr.Chatbot()
26
  user_input = gr.Textbox(placeholder="What would you like to repair? Please name make, model and problem.")
27
+ response_type = gr.Radio(["Simple Language", "Technical", "Homer Simpson Language", "Sarcasm"], label="Answer Style")
28
  submit_btn = gr.Button("Submit", elem_classes="submit-button")
29
 
30
+ submit_btn.click(fn=chatbot_interface, inputs=[chatbot, user_input, response_type], outputs=chatbot)
31
+ user_input.submit(chatbot_interface, [chatbot, user_input, response_type], chatbot)
 
 
 
 
 
 
32
 
33
  # "Did the repair work?" label
34
  gr.Markdown("**Did the repair work?**")
35
 
36
+ # Feedback buttons (not functional yet)
37
  with gr.Row(elem_classes="feedback-buttons"):
38
  thumbs_up = gr.Button("๐Ÿ‘ Yes")
39
  thumbs_down = gr.Button("๐Ÿ‘Ž No")
40
 
 
 
 
41
  # Connect thumbs up to success message (stops chat)
42
+ thumbs_up.click(fn=feedback_positive, inputs=[chat_history], outputs=chatbot)
43
 
44
  # Connect thumbs down to continue troubleshooting
45
+ thumbs_down.click(fn=feedback_negative, inputs=[chat_history], outputs=chatbot)
46
+
47
  app.queue().launch()
48