Spaces:
Runtime error
Runtime error
Updating UI
Browse files
app.py
CHANGED
|
@@ -25,16 +25,15 @@ chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True)
|
|
| 25 |
|
| 26 |
def pull_relevant_info(query, top_k=3):
|
| 27 |
query_embedding = model.encode(query, convert_to_tensor=True)
|
| 28 |
-
|
| 29 |
|
| 30 |
-
|
| 31 |
|
| 32 |
-
similarities = torch.matmul(
|
| 33 |
|
| 34 |
top_indices = torch.topk(similarities, k=top_k).indices.cpu().numpy()
|
| 35 |
|
| 36 |
relevant_info = "\n\n".join([cleaned_chunks[i] for i in top_indices])
|
| 37 |
-
|
| 38 |
return relevant_info
|
| 39 |
|
| 40 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", provider="auto")
|
|
@@ -42,7 +41,7 @@ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", provider="auto")
|
|
| 42 |
def respond(message, history):
|
| 43 |
|
| 44 |
info = pull_relevant_info(message, top_k=3)
|
| 45 |
-
system_message = (f"You are a friendly chatbot. Use the following information to help answer the user's question:\n{info}\n")
|
| 46 |
messages = [{"role": "system", "content": system_message}]
|
| 47 |
|
| 48 |
if history:
|
|
@@ -62,8 +61,35 @@ def respond(message, history):
|
|
| 62 |
|
| 63 |
yield response
|
| 64 |
|
| 65 |
-
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
chatbot.launch()
|
| 69 |
|
|
|
|
| 25 |
|
| 26 |
def pull_relevant_info(query, top_k=3):
|
| 27 |
query_embedding = model.encode(query, convert_to_tensor=True)
|
| 28 |
+
query_embedding = query_embedding / query_embedding.norm()
|
| 29 |
|
| 30 |
+
norm_chunk_embeddings = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
|
| 31 |
|
| 32 |
+
similarities = torch.matmul(norm_chunk_embeddings, query_embedding)
|
| 33 |
|
| 34 |
top_indices = torch.topk(similarities, k=top_k).indices.cpu().numpy()
|
| 35 |
|
| 36 |
relevant_info = "\n\n".join([cleaned_chunks[i] for i in top_indices])
|
|
|
|
| 37 |
return relevant_info
|
| 38 |
|
| 39 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", provider="auto")
|
|
|
|
| 41 |
def respond(message, history):
|
| 42 |
|
| 43 |
info = pull_relevant_info(message, top_k=3)
|
| 44 |
+
system_message = (f"You are a friendly chatbot. Use the following information to help answer the user's question:\n\n{info}\n\n")
|
| 45 |
messages = [{"role": "system", "content": system_message}]
|
| 46 |
|
| 47 |
if history:
|
|
|
|
| 61 |
|
| 62 |
yield response
|
| 63 |
|
| 64 |
+
title = "# Writing Tutor"
|
| 65 |
+
topics = """
|
| 66 |
+
### Meet your friendly writing tutor, an AI-driven partner to turn to when you need help writing an essay.
|
| 67 |
+
Feel free to ask me about the topics below:
|
| 68 |
+
- How to organize your essay
|
| 69 |
+
- What a thesis is and how to write it
|
| 70 |
+
- How to craft an introduction paragraph
|
| 71 |
+
- What your body paragraphs should accomplish
|
| 72 |
+
- Important things to include in your conclusion
|
| 73 |
+
- Examples of topic sentences
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
with gr.BLocks(theme='JohnSmith9982/small_and_pretty') as chatbot:
|
| 77 |
+
# gr.Markdown(welcome_message)
|
| 78 |
+
with gr.Row():
|
| 79 |
+
with gr.Column():
|
| 80 |
+
gr.Markdown(title)
|
| 81 |
+
gr.Markdown(topics)
|
| 82 |
+
with gr.Row():
|
| 83 |
+
with gr.Column():
|
| 84 |
+
gr.ChatInterface(
|
| 85 |
+
fn=respond,
|
| 86 |
+
type="messages"
|
| 87 |
+
)
|
| 88 |
+
question = gr.Textbox(label="Your question", placeholder="What do you want to ask about?")
|
| 89 |
+
answer = gr.Textbox(label="Writing Tutor Response", placeholder="Writing Tutor will respond here...", interactive=False, lines=10)
|
| 90 |
+
submit_button = gr.Button("Submit")
|
| 91 |
+
submit_button.click(fn=query_model, inputs=question, outputs=answer)
|
| 92 |
+
# chatbot = gr.ChatInterface(respond, type="messages")
|
| 93 |
|
| 94 |
chatbot.launch()
|
| 95 |
|