jeonghin commited on
Commit
f155f58
·
verified ·
1 Parent(s): 1797043

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -5
app.py CHANGED
@@ -91,12 +91,17 @@ def get_conversation_chain(vectorstore):
91
 
92
  # Define a strict prompt template that makes the model answer only based on the document
93
  prompt_template = """
94
- You are a helpful assistant. Answer the question using only the provided document content.
95
- If the answer is not in the document, simply respond with "I cannot provide an answer based on the document.".
96
- Question: {question}
97
- """
 
 
 
98
 
99
- prompt = PromptTemplate(input_variables=["question"], template=prompt_template)
 
 
100
 
101
  try:
102
  llm = ChatOpenAI(model_name="gpt-4o")
@@ -109,6 +114,7 @@ def get_conversation_chain(vectorstore):
109
  memory=memory,
110
  return_source_documents=True,
111
  combine_docs_chain_kwargs={"prompt": prompt},
 
112
  )
113
  return conversation_chain
114
  except Exception as e:
@@ -241,6 +247,11 @@ def chat(slug, user_id):
241
  Restricts chat based on user group and chat count.
242
  """
243
 
 
 
 
 
 
244
  text_chunks = get_text_chunks(get_pdf_text(slug))
245
  vectorstore = get_vectorstore(text_chunks)
246
  st.session_state.conversation = get_conversation_chain(vectorstore)
 
91
 
92
  # Define a strict prompt template that makes the model answer only based on the document
93
  prompt_template = """
94
+ You are a helpful assistant. Use the following document context to answer the question.
95
+ If the answer is not in the document, simply respond with "I cannot provide an answer based on the document."
96
+
97
+ Document: {context}
98
+
99
+ Question: {question}
100
+ """
101
 
102
+ prompt = PromptTemplate(
103
+ input_variables=["context", "question"], template=prompt_template
104
+ )
105
 
106
  try:
107
  llm = ChatOpenAI(model_name="gpt-4o")
 
114
  memory=memory,
115
  return_source_documents=True,
116
  combine_docs_chain_kwargs={"prompt": prompt},
117
+ document_variable_name="context", # Specify the variable name for the document context
118
  )
119
  return conversation_chain
120
  except Exception as e:
 
247
  Restricts chat based on user group and chat count.
248
  """
249
 
250
+ # Show the user instruction at the top of the chat interface
251
+ st.write(
252
+ "**Please note:** Due to processing limitations, the chat may not fully comprehend the whole document."
253
+ )
254
+
255
  text_chunks = get_text_chunks(get_pdf_text(slug))
256
  vectorstore = get_vectorstore(text_chunks)
257
  st.session_state.conversation = get_conversation_chain(vectorstore)