kdevoe commited on
Commit
9fdfa98
·
verified ·
1 Parent(s): 677a6da

Reverting back to version without memory

Browse files
Files changed (1) hide show
  1. app.py +8 -19
app.py CHANGED
@@ -28,28 +28,17 @@ qa_chain = RetrievalQA.from_chain_type(
28
  retriever=vectordb.as_retriever()
29
  )
30
 
31
- # Conversation memory
32
- from langchain.memory import ConversationBufferMemory
33
- memory = ConversationBufferMemory(
34
- memory_key="chat_history",
35
- return_messages=True
36
- )
37
 
38
- from langchain.chains import ConversationalRetrievalChain
39
- retriever=vectordb.as_retriever()
40
- qa_memory = ConversationalRetrievalChain.from_llm(
41
- llm,
42
- retriever=vectordb.as_retriever(),
43
- memory=memory
44
- )
45
 
46
 
47
  # Streamed response emulator
48
  def response_generator(prompt):
49
-
50
- response = qa_memory({"question": prompt})['result']
51
-
52
- # Fake streaming
53
  for word in response.split():
54
  yield word + " "
55
  time.sleep(0.05)
@@ -76,7 +65,7 @@ if prompt := st.chat_input("What is up?"):
76
 
77
  # Display assistant response in chat message container
78
  with st.chat_message("assistant"):
79
- response = qa_memory({"question": prompt})
80
- st.write(response)
81
  # Add assistant response to chat history
82
  st.session_state.messages.append({"role": "assistant", "content": response})
 
 
28
  retriever=vectordb.as_retriever()
29
  )
30
 
31
+ question = "production is broken how do I fix it?"
 
 
 
 
 
32
 
33
+ result = qa_chain({"query": question})
34
+
35
+ print(result['result'])
 
 
 
 
36
 
37
 
38
  # Streamed response emulator
39
  def response_generator(prompt):
40
+ response = qa_chain({"query": prompt})['result']
41
+
 
 
42
  for word in response.split():
43
  yield word + " "
44
  time.sleep(0.05)
 
65
 
66
  # Display assistant response in chat message container
67
  with st.chat_message("assistant"):
68
+ response = st.write_stream(response_generator(prompt))
 
69
  # Add assistant response to chat history
70
  st.session_state.messages.append({"role": "assistant", "content": response})
71
+