maximiliandauner commited on
Commit
4bfdd8c
·
1 Parent(s): af59843

added streaming

Browse files
Files changed (1) hide show
  1. app.py +19 -6
app.py CHANGED
@@ -6,6 +6,8 @@ from llama_index.llms import OpenAI
6
  from llama_index.text_splitter import TokenTextSplitter
7
 
8
  from theme import CustomTheme
 
 
9
 
10
  # create storage context
11
  storage_context = StorageContext.from_defaults(persist_dir="modulhandbuch")
@@ -67,12 +69,23 @@ submit_button = gr.Button(
67
  def response(message, history):
68
  if message == "":
69
  answer = default_text
70
- else:
71
- answer = str(query_engine.chat(message, chat_history=query_engine.chat_history))
72
- print("message", message)
73
- print("answer", answer)
74
- print("history", history)
75
- return answer
 
 
 
 
 
 
 
 
 
 
 
76
 
77
 
78
  def main():
 
6
  from llama_index.text_splitter import TokenTextSplitter
7
 
8
  from theme import CustomTheme
9
+ import time
10
+ import asyncio
11
 
12
  # create storage context
13
  storage_context = StorageContext.from_defaults(persist_dir="modulhandbuch")
 
69
  def response(message, history):
70
  if message == "":
71
  answer = default_text
72
+ return answer
73
+
74
+ loop = asyncio.new_event_loop()
75
+ asyncio.set_event_loop(loop)
76
+
77
+ answer = query_engine.stream_chat(message, chat_history=query_engine.chat_history)
78
+
79
+ text = ""
80
+ for token in answer.response_gen:
81
+ time.sleep(0.1)
82
+ text += token
83
+
84
+ yield text
85
+ #print("message", message)
86
+ #print("answer", answer)
87
+ #print("history", history)
88
+ #return answer
89
 
90
 
91
  def main():