Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -52,20 +52,30 @@ if user_input:
|
|
| 52 |
|
| 53 |
# Prepare to get the response from Physics Master
|
| 54 |
st.write('Physics Master is thinking...')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
response = llm.create_chat_completion(
|
| 56 |
messages=st.session_state.messages,
|
| 57 |
stream=True
|
| 58 |
)
|
| 59 |
|
| 60 |
-
# Handle the response from the model and append it to the session state
|
| 61 |
for chunk in response:
|
| 62 |
delta = chunk['choices'][0]['delta']
|
| 63 |
if 'role' in delta:
|
| 64 |
st.session_state.messages.append({'role': delta['role'], 'content': ''})
|
| 65 |
elif 'content' in delta:
|
| 66 |
token = delta['content']
|
| 67 |
-
|
| 68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
# Reset user input box
|
| 71 |
st.session_state.user_input = ""
|
|
|
|
| 52 |
|
| 53 |
# Prepare to get the response from Physics Master
|
| 54 |
st.write('Physics Master is thinking...')
|
| 55 |
+
|
| 56 |
+
# Initialize an empty string to accumulate the response
|
| 57 |
+
full_response = ""
|
| 58 |
+
|
| 59 |
+
# Fetch response tokens and accumulate them
|
| 60 |
response = llm.create_chat_completion(
|
| 61 |
messages=st.session_state.messages,
|
| 62 |
stream=True
|
| 63 |
)
|
| 64 |
|
|
|
|
| 65 |
for chunk in response:
|
| 66 |
delta = chunk['choices'][0]['delta']
|
| 67 |
if 'role' in delta:
|
| 68 |
st.session_state.messages.append({'role': delta['role'], 'content': ''})
|
| 69 |
elif 'content' in delta:
|
| 70 |
token = delta['content']
|
| 71 |
+
# Accumulate tokens into the full response
|
| 72 |
+
full_response += token
|
| 73 |
+
|
| 74 |
+
# Once the full response is received, append it to the chat history
|
| 75 |
+
st.session_state.messages[-1]['content'] = full_response
|
| 76 |
+
|
| 77 |
+
# Display the full response as a paragraph
|
| 78 |
+
st.write(f"**Physics Master:** {full_response}")
|
| 79 |
|
| 80 |
# Reset user input box
|
| 81 |
st.session_state.user_input = ""
|