abanm commited on
Commit
ed4ee94
·
verified ·
1 Parent(s): 27402c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -24
app.py CHANGED
@@ -128,33 +128,12 @@ client = InferenceClient(SPACE_URL, token=HF_API_KEY)
128
 
129
 
130
 
131
-
132
-
133
- def fetch_response(prompt):
134
- """
135
- Fetch full text response from the HF Inference Endpoint using the InferenceClient.
136
- Returns tokens in a streaming fashion.
137
- """
138
-
139
-
140
-
141
-
142
-
143
-
144
-
145
-
146
-
147
-
148
-
149
-
150
-
151
-
152
-
153
 
154
  # -------------------------
155
  # Streaming Logic using Generator
156
  # -------------------------
157
- def stream_response(prompt_text, api_key):
158
  """
159
  Stream text from the HF Inference Endpoint using the InferenceClient.
160
  Yields each partial chunk of text as it arrives.
@@ -199,7 +178,7 @@ if prompt:
199
  with st.spinner("Dubs is thinking... Woof Woof! 🐾"):
200
  msg=""
201
  with st.chat_message("assistant", avatar=DUBS_PATH):
202
- full_response = fetch_response(chat_history)
203
  msg=st.write_stream(full_response)
204
 
205
 
 
128
 
129
 
130
 
131
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
  # -------------------------
134
  # Streaming Logic using Generator
135
  # -------------------------
136
+ def stream_response(prompt_text):
137
  """
138
  Stream text from the HF Inference Endpoint using the InferenceClient.
139
  Yields each partial chunk of text as it arrives.
 
178
  with st.spinner("Dubs is thinking... Woof Woof! 🐾"):
179
  msg=""
180
  with st.chat_message("assistant", avatar=DUBS_PATH):
181
+ full_response = stream_response(chat_history)
182
  msg=st.write_stream(full_response)
183
 
184