Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -64,9 +64,15 @@ def answer_question(question, documents):
|
|
| 64 |
context = "\n\n".join([doc.page_content for doc in documents])
|
| 65 |
full_context = f"{context}"
|
| 66 |
prompt = ChatPromptTemplate.from_template(template)
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
-
return chain.invoke({"question": question, "context": full_context})
|
| 70 |
|
| 71 |
# Streamlit file uploader for PDF
|
| 72 |
uploaded_file = st.file_uploader(
|
|
|
|
| 64 |
context = "\n\n".join([doc.page_content for doc in documents])
|
| 65 |
full_context = f"{context}"
|
| 66 |
prompt = ChatPromptTemplate.from_template(template)
|
| 67 |
+
|
| 68 |
+
# Use the prompt and send it directly to the Hugging Face model
|
| 69 |
+
question_with_context = prompt.format(question=question, context=full_context)
|
| 70 |
+
|
| 71 |
+
# Use the client (InferenceClient) to get a response
|
| 72 |
+
response = client.query(question_with_context)
|
| 73 |
+
|
| 74 |
+
return response["generated_text"] # Assuming the response is in "generated_text"
|
| 75 |
|
|
|
|
| 76 |
|
| 77 |
# Streamlit file uploader for PDF
|
| 78 |
uploaded_file = st.file_uploader(
|