Spaces:
Sleeping
Sleeping
debugging
Browse files
app.py
CHANGED
|
@@ -4,6 +4,7 @@ from huggingface_hub import InferenceClient
|
|
| 4 |
# import lines go at the top: any libraries I need to import go up here ^^
|
| 5 |
from sentence_transformers import SentenceTransformer
|
| 6 |
import torch
|
|
|
|
| 7 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 8 |
|
| 9 |
# Step 1: Load the knowledge base
|
|
@@ -22,9 +23,6 @@ def preprocess_text(text):
|
|
| 22 |
cleaned_chunks = preprocess_text(skincare_text)
|
| 23 |
|
| 24 |
# Step 3: Convert chunks into embeddings
|
| 25 |
-
from sentence_transformers import SentenceTransformer
|
| 26 |
-
import torch
|
| 27 |
-
|
| 28 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 29 |
|
| 30 |
def create_embeddings(text_chunks):
|
|
@@ -46,7 +44,7 @@ def get_top_chunks(query, chunk_embeddings, text_chunks, top_k=3):
|
|
| 46 |
# Step 5: Test the workflow with sample queries
|
| 47 |
queries = [
|
| 48 |
"Consistent skincare routine",
|
| 49 |
-
"Applying sunscreen daily",
|
| 50 |
"Choosing products that match your skin type"
|
| 51 |
]
|
| 52 |
|
|
@@ -57,19 +55,16 @@ for q in queries:
|
|
| 57 |
print(f"Result {idx}: {res}")
|
| 58 |
|
| 59 |
def respond(message, history):
|
| 60 |
-
|
| 61 |
-
top_results = get_top_chunks(question, chunk_embeddings, cleaned_chunks)
|
| 62 |
print(top_results)
|
| 63 |
-
|
| 64 |
-
messages = [{"role": "system", "content": "You are a friendly chatbot. You give people advice about skincare. Base your response on the following information {top_results}"}]
|
| 65 |
-
|
| 66 |
-
if history:
|
| 67 |
messages.extend(history)
|
| 68 |
-
|
| 69 |
messages.append({"role": "user", "content": message})
|
| 70 |
-
|
| 71 |
-
response = client.chat_completion(messages, max_tokens
|
| 72 |
-
|
| 73 |
return response['choices'][0]['message']['content'].strip()
|
| 74 |
|
| 75 |
def echo(message, history):
|
|
|
|
| 4 |
# import lines go at the top: any libraries I need to import go up here ^^
|
| 5 |
from sentence_transformers import SentenceTransformer
|
| 6 |
import torch
|
| 7 |
+
|
| 8 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 9 |
|
| 10 |
# Step 1: Load the knowledge base
|
|
|
|
| 23 |
cleaned_chunks = preprocess_text(skincare_text)
|
| 24 |
|
| 25 |
# Step 3: Convert chunks into embeddings
|
|
|
|
|
|
|
|
|
|
| 26 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 27 |
|
| 28 |
def create_embeddings(text_chunks):
|
|
|
|
| 44 |
# Step 5: Test the workflow with sample queries
|
| 45 |
queries = [
|
| 46 |
"Consistent skincare routine",
|
| 47 |
+
"Applying sunscreen daily",
|
| 48 |
"Choosing products that match your skin type"
|
| 49 |
]
|
| 50 |
|
|
|
|
| 55 |
print(f"Result {idx}: {res}")
|
| 56 |
|
| 57 |
def respond(message, history):
|
| 58 |
+
top_results = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
|
|
|
|
| 59 |
print(top_results)
|
| 60 |
+
|
| 61 |
+
messages = [{"role": "system", "content": f"You are a friendly chatbot. You give people advice about skincare. Base your response on the following information: {top_results}"}]
|
| 62 |
+
|
| 63 |
+
if history:
|
| 64 |
messages.extend(history)
|
|
|
|
| 65 |
messages.append({"role": "user", "content": message})
|
| 66 |
+
|
| 67 |
+
response = client.chat_completion(messages, max_tokens=100)
|
|
|
|
| 68 |
return response['choices'][0]['message']['content'].strip()
|
| 69 |
|
| 70 |
def echo(message, history):
|