Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -20,25 +20,22 @@ HF_MODEL1 = 'HuggingFaceH4/zephyr-7b-beta'
|
|
| 20 |
vector_path = 'faiss_index'
|
| 21 |
hf_token = os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
| 22 |
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
#
|
| 27 |
-
if os.path.exists("faiss_index"):
|
| 28 |
-
vectordb = FAISS.load_local(vector_path, embedding_model, allow_dangerous_deserialization=True)
|
| 29 |
-
else:
|
| 30 |
-
raise FileNotFoundError("FAISS index not found in Space. Please upload it to faiss_index/")
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def respond(
|
| 34 |
-
message,
|
| 35 |
-
history: list[tuple[str, str]],
|
| 36 |
-
system_message,
|
| 37 |
-
max_tokens,
|
| 38 |
-
temperature
|
| 39 |
#top_p
|
| 40 |
):
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
# define retriever object
|
| 43 |
retriever = vectordb.as_retriever(search_type="similarity", search_kwargs={"k": 5})
|
| 44 |
|
|
@@ -46,32 +43,34 @@ def respond(
|
|
| 46 |
llm = HuggingFaceHub(
|
| 47 |
repo_id=MISTRAL_MODEL1,
|
| 48 |
huggingfacehub_api_token=hf_token,
|
| 49 |
-
model_kwargs={"temperature":
|
| 50 |
)
|
|
|
|
| 51 |
# create a RAG pipeline
|
| 52 |
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=retriever)
|
| 53 |
#generate results
|
| 54 |
result = qa_chain.invoke(message)
|
|
|
|
| 55 |
|
| 56 |
-
yield
|
| 57 |
|
| 58 |
|
| 59 |
demo = gr.ChatInterface(
|
| 60 |
respond,
|
| 61 |
type="messages",
|
| 62 |
-
autofocus=False
|
| 63 |
-
additional_inputs=[
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
],
|
| 75 |
)
|
| 76 |
|
| 77 |
|
|
|
|
| 20 |
vector_path = 'faiss_index'
|
| 21 |
hf_token = os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
| 22 |
|
| 23 |
+
def respond(message, history #,
|
| 24 |
+
#system_message,
|
| 25 |
+
#max_tokens,
|
| 26 |
+
#temperature,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
#top_p
|
| 28 |
):
|
| 29 |
+
|
| 30 |
+
# Initialize your embedding model
|
| 31 |
+
embedding_model = HuggingFaceEmbeddings(model_name=EMB_MODEL1)
|
| 32 |
+
|
| 33 |
+
# Load FAISS from relative path
|
| 34 |
+
if os.path.exists("faiss_index"):
|
| 35 |
+
vectordb = FAISS.load_local(vector_path, embedding_model, allow_dangerous_deserialization=True)
|
| 36 |
+
else:
|
| 37 |
+
raise FileNotFoundError("FAISS index not found in Space. Please upload it to faiss_index/")
|
| 38 |
+
|
| 39 |
# define retriever object
|
| 40 |
retriever = vectordb.as_retriever(search_type="similarity", search_kwargs={"k": 5})
|
| 41 |
|
|
|
|
| 43 |
llm = HuggingFaceHub(
|
| 44 |
repo_id=MISTRAL_MODEL1,
|
| 45 |
huggingfacehub_api_token=hf_token,
|
| 46 |
+
model_kwargs={"temperature": 0.5, "max_new_tokens": 512}
|
| 47 |
)
|
| 48 |
+
|
| 49 |
# create a RAG pipeline
|
| 50 |
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=retriever)
|
| 51 |
#generate results
|
| 52 |
result = qa_chain.invoke(message)
|
| 53 |
+
responce = result['result']
|
| 54 |
|
| 55 |
+
yield responce
|
| 56 |
|
| 57 |
|
| 58 |
demo = gr.ChatInterface(
|
| 59 |
respond,
|
| 60 |
type="messages",
|
| 61 |
+
autofocus=False #,
|
| 62 |
+
#additional_inputs=[
|
| 63 |
+
# gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
|
| 64 |
+
# gr.Slider(minimum=128, maximum=1024, value=512, step=128, label="Max new tokens"),
|
| 65 |
+
# gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature"),
|
| 66 |
+
# gr.Slider(
|
| 67 |
+
# minimum=0.1,
|
| 68 |
+
# maximum=1.0,
|
| 69 |
+
# value=0.95,
|
| 70 |
+
# step=0.05,
|
| 71 |
+
# label="Top-p (nucleus sampling)",
|
| 72 |
+
# ),
|
| 73 |
+
#],
|
| 74 |
)
|
| 75 |
|
| 76 |
|