Update app.py
Browse files
app.py
CHANGED
|
@@ -91,14 +91,11 @@ def generate_chunked_response(model, prompt, max_tokens=500, max_chunks=5):
|
|
| 91 |
def response(database, model, question):
|
| 92 |
prompt_val = ChatPromptTemplate.from_template(prompt)
|
| 93 |
retriever = database.as_retriever()
|
| 94 |
-
|
| 95 |
context = retriever.get_relevant_documents(question)
|
| 96 |
context_str = "\n".join([doc.page_content for doc in context])
|
| 97 |
-
|
| 98 |
formatted_prompt = prompt_val.format(context=context_str, question=question)
|
| 99 |
-
|
| 100 |
ans = generate_chunked_response(model, formatted_prompt)
|
| 101 |
-
return ans #
|
| 102 |
|
| 103 |
def update_vectors(files, use_recursive_splitter):
|
| 104 |
if not files:
|
|
|
|
| 91 |
def response(database, model, question):
|
| 92 |
prompt_val = ChatPromptTemplate.from_template(prompt)
|
| 93 |
retriever = database.as_retriever()
|
|
|
|
| 94 |
context = retriever.get_relevant_documents(question)
|
| 95 |
context_str = "\n".join([doc.page_content for doc in context])
|
|
|
|
| 96 |
formatted_prompt = prompt_val.format(context=context_str, question=question)
|
|
|
|
| 97 |
ans = generate_chunked_response(model, formatted_prompt)
|
| 98 |
+
return ans.split("Question:")[-1].strip() # Return only the answer part
|
| 99 |
|
| 100 |
def update_vectors(files, use_recursive_splitter):
|
| 101 |
if not files:
|