kdevoe commited on
Commit
2fcee0d
·
verified ·
1 Parent(s): 7c36c9f

Integrating chat to llm response

Browse files
Files changed (1) hide show
  1. app.py +10 -7
app.py CHANGED
@@ -36,12 +36,15 @@ print(result['result'])
36
 
37
 
38
  # Streamed response emulator
39
- def response_generator():
40
- response = random.choice(
41
- [
42
- result['result']
43
- ]
44
- )
 
 
 
45
  for word in response.split():
46
  yield word + " "
47
  time.sleep(0.05)
@@ -68,6 +71,6 @@ if prompt := st.chat_input("What is up?"):
68
 
69
  # Display assistant response in chat message container
70
  with st.chat_message("assistant"):
71
- response = st.write_stream(response_generator())
72
  # Add assistant response to chat history
73
  st.session_state.messages.append({"role": "assistant", "content": response})
 
36
 
37
 
38
  # Streamed response emulator
39
+ def response_generator(prompt):
40
+ # response = random.choice(
41
+ # [
42
+ # result['result']
43
+ # ]
44
+ # )
45
+
46
+ response = qa_chain({"query": prompt})['result']
47
+
48
  for word in response.split():
49
  yield word + " "
50
  time.sleep(0.05)
 
71
 
72
  # Display assistant response in chat message container
73
  with st.chat_message("assistant"):
74
+ response = st.write_stream(response_generator(prompt))
75
  # Add assistant response to chat history
76
  st.session_state.messages.append({"role": "assistant", "content": response})