abanm commited on
Commit
0411f90
·
verified ·
1 Parent(s): 7d71d00

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -22
app.py CHANGED
@@ -23,7 +23,7 @@ except OSError as e:
23
  st.set_page_config(page_title="DUBSChat", page_icon=IMAGE_PATH, layout="wide")
24
 
25
  # If you are using a custom "logo" method:
26
- st.logo(IMAGE_PATH_2)
27
 
28
  # -------------------------
29
  # Utility Functions
@@ -114,28 +114,39 @@ for message in st.session_state["messages"]:
114
  # -------------------------
115
  # Streaming Logic
116
  # -------------------------
117
- def stream_response(prompt_text, api_key):
118
  """
119
  Stream text from the HF Inference Endpoint (or any streaming API).
120
  Yields each chunk of text as it arrives.
121
  """
122
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  # POST request with stream=True to get partial chunks
124
  response = requests.post(
125
  SPACE_URL,
126
- json={"prompt": prompt_text}, # Adjust this to match your endpoint’s payload
127
  headers={"Authorization": f"Bearer {api_key}"},
128
  stream=True
129
  )
130
  response.raise_for_status()
131
 
132
- # The endpoint presumably returns lines of JSON. Adjust parsing if your endpoint differs:
133
  for line in response.iter_lines():
134
  if line:
135
  data = json.loads(line.decode("utf-8"))
136
- # Example format: data might be [{"generated_text": "..."}]
137
- # Adjust if your endpoint returns different JSON keys
138
- chunk = data[0].get("generated_text", "")
139
  yield chunk
140
 
141
  except requests.exceptions.Timeout:
@@ -157,23 +168,9 @@ if prompt := st.chat_input():
157
  st.chat_message("user").write(prompt)
158
 
159
  # 2) Build combined chat history for the model prompt
160
- # or whatever format your endpoint needs. Example:
161
  chat_history = "".join(
162
  [f"<|{msg['role']}|>{msg['content']}<|end|>" for msg in st.session_state["messages"]]
163
  )
164
 
165
  # 3) Create a placeholder for the assistant’s streamed response
166
- with st.spinner("Dubs is thinking... Woof Woof! 🐾"):
167
- assistant_message_placeholder = st.chat_message("assistant", avatar=Dubs_PATH).empty()
168
-
169
- full_response = ""
170
- # 4) Stream chunks from the API
171
- for chunk in stream_response(chat_history, HF_API_KEY):
172
- full_response += chunk
173
- # Continuously update the placeholder with the partial response
174
- assistant_message_placeholder.write(full_response)
175
-
176
- # 5) Save the final assistant message in session state
177
- st.session_state["messages"].append({"role": "assistant", "content": full_response})
178
- # 6) Persist updated chat history
179
- save_chat_history(st.session_state["session_name"], st.session_state["messages"])
 
23
  st.set_page_config(page_title="DUBSChat", page_icon=IMAGE_PATH, layout="wide")
24
 
25
  # If you are using a custom "logo" method:
26
+ st.image(IMAGE_PATH_2, width=200)
27
 
28
  # -------------------------
29
  # Utility Functions
 
114
  # -------------------------
115
  # Streaming Logic
116
  # -------------------------
117
+ def stream_response(chat_history, api_key):
118
  """
119
  Stream text from the HF Inference Endpoint (or any streaming API).
120
  Yields each chunk of text as it arrives.
121
  """
122
  try:
123
+ # Prepare payload for the POST request
124
+ payload = {
125
+ "inputs": chat_history,
126
+ "parameters": {
127
+ "temperature": 0.7,
128
+ "top_p": 0.9,
129
+ "max_new_tokens": 200,
130
+ },
131
+ "options": {
132
+ "stream": True
133
+ },
134
+ }
135
+
136
  # POST request with stream=True to get partial chunks
137
  response = requests.post(
138
  SPACE_URL,
139
+ json=payload,
140
  headers={"Authorization": f"Bearer {api_key}"},
141
  stream=True
142
  )
143
  response.raise_for_status()
144
 
145
+ # Process and yield streamed chunks of text
146
  for line in response.iter_lines():
147
  if line:
148
  data = json.loads(line.decode("utf-8"))
149
+ chunk = data.get("generated_text", "")
 
 
150
  yield chunk
151
 
152
  except requests.exceptions.Timeout:
 
168
  st.chat_message("user").write(prompt)
169
 
170
  # 2) Build combined chat history for the model prompt
 
171
  chat_history = "".join(
172
  [f"<|{msg['role']}|>{msg['content']}<|end|>" for msg in st.session_state["messages"]]
173
  )
174
 
175
  # 3) Create a placeholder for the assistant’s streamed response
176
+ with st.spinner("Dubs is thinking... Wo