Tafazzul-Nadeeem commited on
Commit
e858d01
·
1 Parent(s): 0b38b77

Prescription feature done

Browse files
Files changed (1) hide show
  1. app.py +9 -10
app.py CHANGED
@@ -141,19 +141,18 @@ with gr.Blocks() as demo:
141
  response = agent3_llm_agent(clean_messages)
142
  #######################################################################
143
 
144
- history.append({"role": "assistant", "content": response})
145
-
146
-
147
- return history
148
- # # Simulate streaming response
149
- # for character in response:
150
- # history[-1]['content'] += character
151
- # time.sleep(0.005)
152
- # yield history
153
 
154
 
155
  ##########################################################################
156
- chatbot = gr.Chatbot(type="messages", render_markdown=True)
157
  chat_input = gr.MultimodalTextbox(
158
  interactive=True,
159
  file_count="multiple",
 
141
  response = agent3_llm_agent(clean_messages)
142
  #######################################################################
143
 
144
+ # history.append({"role": "assistant", "content": response})
145
+ # return history
146
+
147
+ history.append({"role": "assistant", "content": ""})
148
+ for line in response.split("\n"):
149
+ history[-1]["content"] += line + "\n"
150
+ time.sleep(0.3) # Adjust delay for pacing
151
+ yield history
 
152
 
153
 
154
  ##########################################################################
155
+ chatbot = gr.Chatbot(type="messages", render_markdown=True, height=700)
156
  chat_input = gr.MultimodalTextbox(
157
  interactive=True,
158
  file_count="multiple",