Spaces:
Sleeping
Sleeping
feat: implement streaming responses using AsyncOpenAI
Browse files
app.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
| 2 |
|
| 3 |
# OpenAI Chat completion
|
| 4 |
import os
|
| 5 |
-
from openai import AsyncOpenAI
|
| 6 |
import chainlit as cl # importing chainlit for our app
|
| 7 |
from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
|
| 8 |
from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
|
|
@@ -16,7 +16,7 @@ load_dotenv()
|
|
| 16 |
logger.info("Environment variables loaded")
|
| 17 |
|
| 18 |
# Initialize OpenAI client
|
| 19 |
-
client =
|
| 20 |
|
| 21 |
# Initialize managers
|
| 22 |
config = ConfigManager()
|
|
@@ -56,23 +56,28 @@ async def main(message: cl.Message):
|
|
| 56 |
model_config = config.get_model_config()
|
| 57 |
logger.debug("Retrieved model configuration")
|
| 58 |
|
| 59 |
-
#
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
model=model_config["name"],
|
| 62 |
messages=messages,
|
| 63 |
temperature=model_config["temperature"],
|
| 64 |
max_tokens=model_config["max_tokens"],
|
| 65 |
top_p=model_config["top_p"],
|
| 66 |
frequency_penalty=model_config["frequency_penalty"],
|
| 67 |
-
presence_penalty=model_config["presence_penalty"]
|
|
|
|
| 68 |
)
|
| 69 |
-
logger.info("Received response from OpenAI")
|
| 70 |
|
| 71 |
-
#
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
|
|
|
| 76 |
|
| 77 |
@cl.action_callback("select_aspect")
|
| 78 |
async def on_action(action):
|
|
|
|
| 2 |
|
| 3 |
# OpenAI Chat completion
|
| 4 |
import os
|
| 5 |
+
from openai import AsyncOpenAI # importing openai for API usage
|
| 6 |
import chainlit as cl # importing chainlit for our app
|
| 7 |
from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
|
| 8 |
from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
|
|
|
|
| 16 |
logger.info("Environment variables loaded")
|
| 17 |
|
| 18 |
# Initialize OpenAI client
|
| 19 |
+
client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
| 20 |
|
| 21 |
# Initialize managers
|
| 22 |
config = ConfigManager()
|
|
|
|
| 56 |
model_config = config.get_model_config()
|
| 57 |
logger.debug("Retrieved model configuration")
|
| 58 |
|
| 59 |
+
# Create a message element for streaming
|
| 60 |
+
msg = cl.Message(content="")
|
| 61 |
+
await msg.send()
|
| 62 |
+
|
| 63 |
+
# Stream the response
|
| 64 |
+
stream = await client.chat.completions.create(
|
| 65 |
model=model_config["name"],
|
| 66 |
messages=messages,
|
| 67 |
temperature=model_config["temperature"],
|
| 68 |
max_tokens=model_config["max_tokens"],
|
| 69 |
top_p=model_config["top_p"],
|
| 70 |
frequency_penalty=model_config["frequency_penalty"],
|
| 71 |
+
presence_penalty=model_config["presence_penalty"],
|
| 72 |
+
stream=True
|
| 73 |
)
|
|
|
|
| 74 |
|
| 75 |
+
# Process the stream
|
| 76 |
+
async for chunk in stream:
|
| 77 |
+
if chunk.choices[0].delta.content is not None:
|
| 78 |
+
await msg.stream_token(chunk.choices[0].delta.content)
|
| 79 |
+
|
| 80 |
+
logger.info("Completed streaming response")
|
| 81 |
|
| 82 |
@cl.action_callback("select_aspect")
|
| 83 |
async def on_action(action):
|