Commit ·
4bfdd8c
1
Parent(s): af59843
added streaming
Browse files
app.py
CHANGED
|
@@ -6,6 +6,8 @@ from llama_index.llms import OpenAI
|
|
| 6 |
from llama_index.text_splitter import TokenTextSplitter
|
| 7 |
|
| 8 |
from theme import CustomTheme
|
|
|
|
|
|
|
| 9 |
|
| 10 |
# create storage context
|
| 11 |
storage_context = StorageContext.from_defaults(persist_dir="modulhandbuch")
|
|
@@ -67,12 +69,23 @@ submit_button = gr.Button(
|
|
| 67 |
def response(message, history):
|
| 68 |
if message == "":
|
| 69 |
answer = default_text
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
|
| 78 |
def main():
|
|
|
|
| 6 |
from llama_index.text_splitter import TokenTextSplitter
|
| 7 |
|
| 8 |
from theme import CustomTheme
|
| 9 |
+
import time
|
| 10 |
+
import asyncio
|
| 11 |
|
| 12 |
# create storage context
|
| 13 |
storage_context = StorageContext.from_defaults(persist_dir="modulhandbuch")
|
|
|
|
| 69 |
def response(message, history):
|
| 70 |
if message == "":
|
| 71 |
answer = default_text
|
| 72 |
+
return answer
|
| 73 |
+
|
| 74 |
+
loop = asyncio.new_event_loop()
|
| 75 |
+
asyncio.set_event_loop(loop)
|
| 76 |
+
|
| 77 |
+
answer = query_engine.stream_chat(message, chat_history=query_engine.chat_history)
|
| 78 |
+
|
| 79 |
+
text = ""
|
| 80 |
+
for token in answer.response_gen:
|
| 81 |
+
time.sleep(0.1)
|
| 82 |
+
text += token
|
| 83 |
+
|
| 84 |
+
yield text
|
| 85 |
+
#print("message", message)
|
| 86 |
+
#print("answer", answer)
|
| 87 |
+
#print("history", history)
|
| 88 |
+
#return answer
|
| 89 |
|
| 90 |
|
| 91 |
def main():
|