Spaces:
Sleeping
Sleeping
File size: 2,753 Bytes
5f0931f 635ce8c 5f0931f 635ce8c 5f0931f 635ce8c 5f0931f 635ce8c 85b19c3 5f0931f 635ce8c 5f0931f 635ce8c 5f0931f e0cd251 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import gradio as gr
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage
from llama_index.core import PromptTemplate
import os
PERSIST_DIR = "./storage"
if not os.path.exists(PERSIST_DIR):
# load the documents and create the index
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
# store it for later
index.storage_context.persist(persist_dir=PERSIST_DIR)
else:
# load the existing index
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
# Query Engine
query_engine = index.as_query_engine(response_mode="compact")
new_query_tmpl_str = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"As an employee specialized in customer service, your main role is to assist users by answering questions based on the context."
"You are here to ensure that users receive accurate and helpful responses to their inquiries, making their experience smooth and satisfactory."
"If you don't know something, tell the user that you don't have the information yet and that it will be updated later."
"Your answer should follow this format."
"-------------------------\n"
"In this first section, You will summarize and reflect your understanding of the user's question. This step ensures you grasp exactly what the user is asking or seeking, preparing to address it appropriately in your response.\n"
"In this section, Here, you will provide a detailed answer to the user's question. This part includes relevant information, solutions, or suggestions related to the user's query, aiming to deliver the necessary insights in a clear and understandable manner.\n"
"In this final section, you'll ask if the answer was satisfactory or if there's a need for further information or clarification. This is to ensure the quality of your response and offer an opportunity to address any additional queries the user might have.\n"
"-------------------------\n"
"The answer must be the same lanugage as the query.\n"
"Query: {query_str}\n"
"Answer: "
)
new_qeury_tmpl = PromptTemplate(new_query_tmpl_str)
query_engine.update_prompts(
{"response_synthesizer:text_qa_template": new_qeury_tmpl}
)
def predict(user_prompt: str) -> str:
related_doc = query_engine.query(user_prompt)
return related_doc
gr.Interface(
fn=predict,
inputs="text",
outputs="text",
title="verychat customer service",
description="CS with ChatGPT3.5 and llama-index"
).launch()
|