Spaces:
Sleeping
Sleeping
added whole program
Browse files- README.md +3 -3
- chat_logic/__pycache__/chat_stream.cpython-311.pyc +0 -0
- chat_logic/__pycache__/prompts.cpython-311.pyc +0 -0
- chat_logic/chat_stream.py +126 -0
- chat_logic/prompts.py +20 -0
- helper_functions/__pycache__/checkpoint.cpython-311.pyc +0 -0
- helper_functions/__pycache__/llm_base_client.cpython-311.pyc +0 -0
- helper_functions/checkpoint.py +12 -0
- helper_functions/llm_base_client.py +12 -0
- images/.gitkeep +0 -0
- images/logo.png +0 -0
- main.py +20 -0
- rag/__pycache__/ifixit_document_retrieval.cpython-311.pyc +0 -0
- rag/__pycache__/vectorization_functions.cpython-311.pyc +0 -0
- rag/ifixit_document_retrieval.py +74 -0
- rag/vectorization_functions.py +57 -0
- requirements.txt +27 -0
- ui/__pycache__/custom_css.cpython-311.pyc +0 -0
- ui/__pycache__/interface_design.cpython-311.pyc +0 -0
- ui/custom_css.py +30 -0
- ui/interface_design.py +62 -0
README.md
CHANGED
|
@@ -1,13 +1,13 @@
|
|
| 1 |
---
|
| 2 |
title: RepAIr
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: green
|
| 5 |
colorTo: green
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.23.3
|
| 8 |
-
app_file:
|
| 9 |
pinned: false
|
| 10 |
-
short_description: Our help bot using iFixit
|
| 11 |
---
|
| 12 |
|
| 13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
title: RepAIr
|
| 3 |
+
emoji: 🛠️
|
| 4 |
colorFrom: green
|
| 5 |
colorTo: green
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.23.3
|
| 8 |
+
app_file: main.py
|
| 9 |
pinned: false
|
| 10 |
+
short_description: Our help bot using iFixit! :)
|
| 11 |
---
|
| 12 |
|
| 13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
chat_logic/__pycache__/chat_stream.cpython-311.pyc
ADDED
|
Binary file (3.38 kB). View file
|
|
|
chat_logic/__pycache__/prompts.cpython-311.pyc
ADDED
|
Binary file (941 Bytes). View file
|
|
|
chat_logic/chat_stream.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#%%
|
| 2 |
+
# processing functions
|
| 3 |
+
from rag.vectorization_functions import split_documents, create_embedding_vector_db, query_vector_db
|
| 4 |
+
# lead ifixit infos
|
| 5 |
+
from rag.ifixit_document_retrieval import load_ifixit_guides
|
| 6 |
+
#model
|
| 7 |
+
from helper_functions.llm_base_client import llm_base_client_init
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def chatbot_interface(history, user_query):
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
LLM Model is defined here.
|
| 14 |
+
Chat history use and chat with user coded here.
|
| 15 |
+
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
if not user_query.strip():
|
| 19 |
+
return history + [(user_query, "Hey, I'd love to help you! What can I do for you?")]
|
| 20 |
+
|
| 21 |
+
messages = [{"role": "system",
|
| 22 |
+
"content": """You are a helpful assistant
|
| 23 |
+
that helps users with the repair of their devices. Ask them if they need help with a repair.
|
| 24 |
+
If they do, ask them to provide the device name and model."""}]
|
| 25 |
+
|
| 26 |
+
if history:
|
| 27 |
+
for user_msg, bot_msg in history:
|
| 28 |
+
messages.append({"role": "user", "content": user_msg})
|
| 29 |
+
messages.append({"role": "assistant", "content": bot_msg})
|
| 30 |
+
messages.append({"role": "user", "content": user_query})
|
| 31 |
+
print(messages)
|
| 32 |
+
|
| 33 |
+
client = llm_base_client_init()
|
| 34 |
+
|
| 35 |
+
chat_completion = client.chat.completions.create(
|
| 36 |
+
messages=messages,
|
| 37 |
+
model="llama3-8b-8192",
|
| 38 |
+
temperature=0.3
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
return history + [(user_query, chat_completion.choices[0].message.content)]
|
| 42 |
+
|
| 43 |
+
#%%
|
| 44 |
+
# processing functions
|
| 45 |
+
from rag.vectorization_functions import split_documents, create_embedding_vector_db, query_vector_db
|
| 46 |
+
# lead ifixit infos
|
| 47 |
+
from rag.ifixit_document_retrieval import load_ifixit_guides
|
| 48 |
+
#model
|
| 49 |
+
from helper_functions.llm_base_client import llm_base_client_init
|
| 50 |
+
from chat_logic.prompts import load_prompts
|
| 51 |
+
|
| 52 |
+
def chatbot_answer(user_query, memory=None, context="", prompt="default", modelname="llama3-8b-8192", temp=0.3):
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
Chat history use and chat with user coded here.
|
| 56 |
+
|
| 57 |
+
"""
|
| 58 |
+
client = llm_base_client_init()
|
| 59 |
+
answer_prompt = load_prompts(prompt, context)
|
| 60 |
+
messages = [{"role": "system",
|
| 61 |
+
"content": answer_prompt}]
|
| 62 |
+
|
| 63 |
+
if memory:
|
| 64 |
+
for user_msg, bot_msg in memory:
|
| 65 |
+
messages.append({"role": "user", "content": user_msg})
|
| 66 |
+
messages.append({"role": "assistant", "content": bot_msg})
|
| 67 |
+
messages.append({"role": "user", "content": user_query})
|
| 68 |
+
|
| 69 |
+
# calling the LLM with the entire chat history in order to get an answer
|
| 70 |
+
chat_completion = client.chat.completions.create(
|
| 71 |
+
messages=messages,
|
| 72 |
+
model=modelname,
|
| 73 |
+
temperature=temp
|
| 74 |
+
)
|
| 75 |
+
return chat_completion
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def chatbot_interface(history, user_query):
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
LLM Model is defined here.
|
| 82 |
+
Chat history use and chat with user coded here.
|
| 83 |
+
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
# load guides, create embeddings and return answer for first query
|
| 87 |
+
if len(history) == 0:
|
| 88 |
+
data = load_ifixit_guides(user_query, debug=True)
|
| 89 |
+
chunks = split_documents(data)
|
| 90 |
+
global vector_db
|
| 91 |
+
vector_db = create_embedding_vector_db(chunks)
|
| 92 |
+
context = query_vector_db(user_query, vector_db)
|
| 93 |
+
message_content = chatbot_answer(user_query, history, context, prompt="repair_guide")
|
| 94 |
+
answer = history + [(user_query, message_content.choices[0].message.content)]
|
| 95 |
+
return answer
|
| 96 |
+
|
| 97 |
+
# answer questions to the guide
|
| 98 |
+
else:
|
| 99 |
+
context = query_vector_db(user_query, vector_db)
|
| 100 |
+
message_content = chatbot_answer(user_query, history, context, prompt="repair_helper")
|
| 101 |
+
answer = history + [(user_query, message_content.choices[0].message.content)]
|
| 102 |
+
return answer
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# Not implemented yet:
|
| 107 |
+
def answer_style(history, user_query, response_type):
|
| 108 |
+
response = f"Suggested repair steps for: {user_query}\n\n"
|
| 109 |
+
if response_type == "Simple Language":
|
| 110 |
+
response += "Please provide a clear and easy-to-understand explanation."
|
| 111 |
+
elif response_type == "Technical":
|
| 112 |
+
response += "Provide a detailed technical breakdown of the repair process."
|
| 113 |
+
|
| 114 |
+
history.append((user_query, response)) # Append to chat history
|
| 115 |
+
return history
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
# Feedback function for thumbs up (chat ends with success message)
|
| 119 |
+
def feedback_positive(history):
|
| 120 |
+
history.append((None, "🎉 Great! We're happy to hear that your repair was successful! If you need help in the future, feel free to ask."))
|
| 121 |
+
return history
|
| 122 |
+
|
| 123 |
+
# Feedback function for thumbs down (chat continues)
|
| 124 |
+
def feedback_negative(history):
|
| 125 |
+
history.append((None, "I'm sorry to hear that. Could you describe the issue further? Maybe we can find another solution."))
|
| 126 |
+
return history
|
chat_logic/prompts.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def load_prompts(prompt, context=""):
|
| 2 |
+
"""
|
| 3 |
+
Load the prompts from a file or define them in the code.
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
# You can load these prompts
|
| 7 |
+
|
| 8 |
+
if prompt == "default":
|
| 9 |
+
return """You are a helpful assistant that helps users with the repair of their devices.
|
| 10 |
+
Ask them if they need help with a repair.
|
| 11 |
+
If they do, ask them to provide the device name and model."""
|
| 12 |
+
|
| 13 |
+
if prompt == "repair_guide":
|
| 14 |
+
return (f"List repair steps for the Problem. Use the following context:\n{context}")
|
| 15 |
+
|
| 16 |
+
if prompt == "repair_helper":
|
| 17 |
+
return (f"Answer the users question about the guide. Use the following context:\n{context}")
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
|
helper_functions/__pycache__/checkpoint.cpython-311.pyc
ADDED
|
Binary file (876 Bytes). View file
|
|
|
helper_functions/__pycache__/llm_base_client.cpython-311.pyc
ADDED
|
Binary file (661 Bytes). View file
|
|
|
helper_functions/checkpoint.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
def checkpoint(path):
|
| 4 |
+
"""Prints a message indicating the successful execution of a script.
|
| 5 |
+
It will give you the script name where this function is calles by default.
|
| 6 |
+
Alternatively, you can set a string to display a custom message."""
|
| 7 |
+
|
| 8 |
+
script_name = os.path.basename(path) # Extract just the filename
|
| 9 |
+
|
| 10 |
+
print("-" * 30)
|
| 11 |
+
print(f"{script_name} successfully executed!")
|
| 12 |
+
print("-" * 30)
|
helper_functions/llm_base_client.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#%%
|
| 2 |
+
import os
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
from groq import Groq
|
| 5 |
+
|
| 6 |
+
def llm_base_client_init():
|
| 7 |
+
load_dotenv()
|
| 8 |
+
groq_key = os.getenv('GROQ_API_KEY')
|
| 9 |
+
client = Groq(api_key=groq_key)
|
| 10 |
+
|
| 11 |
+
return client
|
| 12 |
+
|
images/.gitkeep
ADDED
|
File without changes
|
images/logo.png
ADDED
|
main.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#%%
|
| 2 |
+
|
| 3 |
+
# import checkpoint message
|
| 4 |
+
from helper_functions.checkpoint import checkpoint
|
| 5 |
+
|
| 6 |
+
# get interface design
|
| 7 |
+
from ui.interface_design import interface_init
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def main():
|
| 11 |
+
interface_init()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
if __name__ == "__main__":
|
| 15 |
+
main()
|
| 16 |
+
|
| 17 |
+
checkpoint(__file__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# %%
|
rag/__pycache__/ifixit_document_retrieval.cpython-311.pyc
ADDED
|
Binary file (3.28 kB). View file
|
|
|
rag/__pycache__/vectorization_functions.cpython-311.pyc
ADDED
|
Binary file (2.14 kB). View file
|
|
|
rag/ifixit_document_retrieval.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from langchain_community.document_loaders import IFixitLoader
|
| 3 |
+
from helper_functions.llm_base_client import llm_base_client_init
|
| 4 |
+
#function for rewriting info into searchphrase
|
| 5 |
+
def write_searchphrase(search_info: str, debug: bool = False):
|
| 6 |
+
"""
|
| 7 |
+
Uses the LLM to rewrite input into a structured searchphrase iFixit searches.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
search_info (str): The information to be turned into a searchphrase.
|
| 11 |
+
"""
|
| 12 |
+
client = llm_base_client_init()
|
| 13 |
+
|
| 14 |
+
chat_completion = client.chat.completions.create(
|
| 15 |
+
messages=[
|
| 16 |
+
{"role": "system",
|
| 17 |
+
"content": """Rewrite the following info into a structured searchphrase for iFixit,
|
| 18 |
+
ensuring it includes in this order the device name, model and part that needs to be repaired.
|
| 19 |
+
Return only the searchphrase, do not include any other words or comments.
|
| 20 |
+
Capitalize the first letter of each word.
|
| 21 |
+
The searchphrase should be a single sentence."""},
|
| 22 |
+
{"role": "user", "content": search_info},
|
| 23 |
+
],
|
| 24 |
+
model="llama3-8b-8192",
|
| 25 |
+
temperature=0.1
|
| 26 |
+
)
|
| 27 |
+
search_phrase = chat_completion.choices[0].message.content
|
| 28 |
+
if debug == True:
|
| 29 |
+
print('Full searchphrase:', search_phrase)
|
| 30 |
+
return search_phrase
|
| 31 |
+
|
| 32 |
+
#load guides from iFixit
|
| 33 |
+
def load_guides(search_phrase: str, debug: bool = False):
|
| 34 |
+
"""
|
| 35 |
+
Load a guide from IFixit based on the search phrase.
|
| 36 |
+
If no guide is found, iteratively remove the last word and retry.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
search_phrase (str): The phrase to search for in IFixit guides.
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
guides: The loaded guide data or None if no guide is found.
|
| 43 |
+
"""
|
| 44 |
+
words = search_phrase.split()
|
| 45 |
+
|
| 46 |
+
while words:
|
| 47 |
+
query = " ".join(words)
|
| 48 |
+
guides = IFixitLoader.load_suggestions(query, doc_type='guide')
|
| 49 |
+
|
| 50 |
+
if guides:
|
| 51 |
+
if(debug == True):
|
| 52 |
+
print('Used words:', words)
|
| 53 |
+
return guides # Return results if found
|
| 54 |
+
|
| 55 |
+
words.pop() # Remove the last word and retry
|
| 56 |
+
|
| 57 |
+
print('No guides found')
|
| 58 |
+
return None # Return None if no guide is found
|
| 59 |
+
|
| 60 |
+
def load_ifixit_guides(search_info: str, debug: bool = False):
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
Rewrites the search info into a searchphrase and loads guides from iFixit.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
search info (str): The information to be turned into a searchphrase.
|
| 67 |
+
|
| 68 |
+
Returns:
|
| 69 |
+
guides: The loaded guide data or None if no guide is found.
|
| 70 |
+
|
| 71 |
+
"""
|
| 72 |
+
search_phrase = write_searchphrase(search_info, debug=debug)
|
| 73 |
+
guides = load_guides(search_phrase, debug=debug)
|
| 74 |
+
return guides
|
rag/vectorization_functions.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# vectorization functions
|
| 2 |
+
#%%
|
| 3 |
+
# General
|
| 4 |
+
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
| 5 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
| 6 |
+
from langchain_community.vectorstores import FAISS
|
| 7 |
+
from helper_functions.llm_base_client import llm_base_client_init
|
| 8 |
+
|
| 9 |
+
def split_documents(documents, chunk_size=800, chunk_overlap=80): # check chunk size and overlap for our purpose
|
| 10 |
+
"""
|
| 11 |
+
this function splits documents into chunks of given size and overlap
|
| 12 |
+
"""
|
| 13 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
| 14 |
+
chunk_size=chunk_size,
|
| 15 |
+
chunk_overlap=chunk_overlap
|
| 16 |
+
)
|
| 17 |
+
chunks = text_splitter.split_documents(documents=documents)
|
| 18 |
+
return chunks
|
| 19 |
+
|
| 20 |
+
def create_embedding_vector_db(chunks #, db_name, target_directory=f"../vector_databases"
|
| 21 |
+
):
|
| 22 |
+
"""
|
| 23 |
+
this function uses the open-source embedding model HuggingFaceEmbeddings
|
| 24 |
+
to create embeddings and store those in a vector database called FAISS,
|
| 25 |
+
which allows for efficient similarity search
|
| 26 |
+
"""
|
| 27 |
+
# instantiate embedding model
|
| 28 |
+
embedding = HuggingFaceEmbeddings(
|
| 29 |
+
model_name='sentence-transformers/all-MiniLM-L6-v2' # EMBEDDING MODEL! converts text to vector ( stick to it)
|
| 30 |
+
)
|
| 31 |
+
# create the vector store
|
| 32 |
+
vector_db = FAISS.from_documents( # stores embeddings # from_documents includes metadata
|
| 33 |
+
documents=chunks,
|
| 34 |
+
embedding=embedding
|
| 35 |
+
)
|
| 36 |
+
return vector_db # optimize
|
| 37 |
+
|
| 38 |
+
#Function to query the vector database and interact with Groq
|
| 39 |
+
def query_vector_db(query, vector_db):
|
| 40 |
+
# Retrieve relevant documents
|
| 41 |
+
docs = vector_db.similarity_search(query, k=3) # neigbors k are the chunks # similarity_search: FAISS function
|
| 42 |
+
context = "\n".join([doc.page_content for doc in docs])
|
| 43 |
+
|
| 44 |
+
return context
|
| 45 |
+
|
| 46 |
+
# client = llm_base_client_init()
|
| 47 |
+
# # Interact with Groq API
|
| 48 |
+
# chat_completion = client.chat.completions.create(
|
| 49 |
+
# messages=[
|
| 50 |
+
# {"role": "system",
|
| 51 |
+
# "content": f"List repair steps for the Problem. Use the following context:\n{context}"},
|
| 52 |
+
# {"role": "user", "content": query},
|
| 53 |
+
# ],
|
| 54 |
+
# model="llama3-8b-8192",
|
| 55 |
+
# temperature=0.3 # optional: check best value!
|
| 56 |
+
# )
|
| 57 |
+
# return chat_completion.choices[0].message.content
|
requirements.txt
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
black==23.3.0
|
| 2 |
+
faiss-cpu==1.10.0
|
| 3 |
+
jupyterlab==4.2.5
|
| 4 |
+
langchain-community==0.3.20
|
| 5 |
+
langchain-groq==0.2.0
|
| 6 |
+
langchain-huggingface==0.1.0
|
| 7 |
+
langchain-openai==0.2.3
|
| 8 |
+
langchainhub==0.1.21
|
| 9 |
+
matplotlib==3.7.1
|
| 10 |
+
numpy==1.26.4
|
| 11 |
+
pandas==2.0.1
|
| 12 |
+
parsenvy==3.0.2
|
| 13 |
+
protobuf==4.23.3
|
| 14 |
+
pypdf==4.2.0
|
| 15 |
+
python-dotenv==1.0.1
|
| 16 |
+
seaborn==0.11.2
|
| 17 |
+
scikit-learn==1.2.2
|
| 18 |
+
statsmodels==0.14.0
|
| 19 |
+
pytest==7.4.0
|
| 20 |
+
testbook==0.4.2
|
| 21 |
+
gradio
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
|
ui/__pycache__/custom_css.cpython-311.pyc
ADDED
|
Binary file (999 Bytes). View file
|
|
|
ui/__pycache__/interface_design.cpython-311.pyc
ADDED
|
Binary file (2.91 kB). View file
|
|
|
ui/custom_css.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
def custom_css():
|
| 3 |
+
custom_css = """
|
| 4 |
+
<style>
|
| 5 |
+
.submit-button {
|
| 6 |
+
background-color: #E69A8D !important; /* Coral Red */
|
| 7 |
+
color: white !important;
|
| 8 |
+
border: none;
|
| 9 |
+
padding: 10px 20px;
|
| 10 |
+
text-align: center;
|
| 11 |
+
font-size: 16px;
|
| 12 |
+
margin: 4px 2px;
|
| 13 |
+
cursor: pointer;
|
| 14 |
+
border-radius: 5px;
|
| 15 |
+
}
|
| 16 |
+
.submit-button:hover {
|
| 17 |
+
background-color: #D17F73 !important;
|
| 18 |
+
}
|
| 19 |
+
.chat-container {
|
| 20 |
+
max-height: 500px;
|
| 21 |
+
overflow-y: auto;
|
| 22 |
+
}
|
| 23 |
+
.feedback-buttons {
|
| 24 |
+
display: flex;
|
| 25 |
+
gap: 10px;
|
| 26 |
+
margin-top: 5px;
|
| 27 |
+
}
|
| 28 |
+
</style>
|
| 29 |
+
"""
|
| 30 |
+
return custom_css
|
ui/interface_design.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
#%%
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
#NEW
|
| 6 |
+
|
| 7 |
+
import gradio as gr
|
| 8 |
+
import os
|
| 9 |
+
from chat_logic.chat_stream import chatbot_interface, feedback_positive, feedback_negative
|
| 10 |
+
from ui.custom_css import custom_css
|
| 11 |
+
|
| 12 |
+
def interface_init():
|
| 13 |
+
|
| 14 |
+
logo_path = "./images/logo.png"
|
| 15 |
+
|
| 16 |
+
# Gradio UI
|
| 17 |
+
with gr.Blocks() as app:
|
| 18 |
+
gr.Image(logo_path, elem_id="logo", show_label=False)
|
| 19 |
+
gr.HTML(custom_css()) # Insert custom CSS
|
| 20 |
+
gr.Markdown("### Repair Assistant - Fix smarter with AI")
|
| 21 |
+
gr.Markdown("State your repair topic, select your response style and start chatting.")
|
| 22 |
+
|
| 23 |
+
# Input field
|
| 24 |
+
#question = gr.Textbox(label="Your Question", placeholder="What would you like to repair? Please name make, model and problem.")
|
| 25 |
+
|
| 26 |
+
# Submit button
|
| 27 |
+
#submit_button = gr.Button("Submit", elem_classes="submit-button")
|
| 28 |
+
|
| 29 |
+
# Chat interface & state
|
| 30 |
+
chat_history = gr.State([])
|
| 31 |
+
chatbot = gr.Chatbot()
|
| 32 |
+
user_input = gr.Textbox(placeholder="What would you like to repair? Please name make, model and problem.")
|
| 33 |
+
submit_btn = gr.Button("Submit", elem_classes="submit-button")
|
| 34 |
+
|
| 35 |
+
submit_btn.click(chatbot_interface, [chatbot, user_input], chatbot)
|
| 36 |
+
user_input.submit(chatbot_interface, [chatbot, user_input], chatbot)
|
| 37 |
+
|
| 38 |
+
# Response style selection
|
| 39 |
+
response_type = gr.Radio(["Simple Language", "Technical"], label="Answer Style")
|
| 40 |
+
|
| 41 |
+
# Connect the start button to chat initialization
|
| 42 |
+
#submit_button.click(fn=start_chat, inputs=[question,response_type], outputs=[chat_history, chatbot, chatbot])
|
| 43 |
+
|
| 44 |
+
# "Did the repair work?" label
|
| 45 |
+
gr.Markdown("**Did the repair work?**")
|
| 46 |
+
|
| 47 |
+
# Feedback buttons
|
| 48 |
+
with gr.Row(elem_classes="feedback-buttons"):
|
| 49 |
+
thumbs_up = gr.Button("👍 Yes")
|
| 50 |
+
thumbs_down = gr.Button("👎 No")
|
| 51 |
+
|
| 52 |
+
# Connect submit button to chatbot function
|
| 53 |
+
#submit_button.click(fn=repair_assistant, inputs=[chat_history, question, response_type], outputs=chatbot)
|
| 54 |
+
|
| 55 |
+
# Connect thumbs up to success message (stops chat)
|
| 56 |
+
#thumbs_up.click(fn=feedback_positive, inputs=[chat_history], outputs=chatbot)
|
| 57 |
+
|
| 58 |
+
# Connect thumbs down to continue troubleshooting
|
| 59 |
+
# thumbs_down.click(fn=feedback_negative, inputs=[chat_history], outputs=chatbot)
|
| 60 |
+
app.queue().launch()
|
| 61 |
+
|
| 62 |
+
# %%
|