abanm commited on
Commit
b52f3ea
·
verified ·
1 Parent(s): a956914

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -18
app.py CHANGED
@@ -14,11 +14,11 @@ st.set_page_config(page_title="Chatbot Test", page_icon="🤖", layout="centered
14
  if "messages" not in st.session_state:
15
  st.session_state["messages"] = []
16
 
17
- # Function to Stream Response
18
- def stream_response(prompt_text, api_key):
19
  """
20
- Stream text from the HF Inference Endpoint using the InferenceClient.
21
- Yields each partial chunk of text as it arrives.
22
  """
23
  client = InferenceClient(SPACE_URL, token=api_key)
24
 
@@ -31,15 +31,11 @@ def stream_response(prompt_text, api_key):
31
  "stop_sequences": ["<|endoftext|>"]
32
  }
33
 
34
- stream = client.text_generation(prompt_text, stream=True, details=True, **gen_kwargs)
35
-
36
  try:
37
- for response in stream:
38
- if response.token.special:
39
- continue
40
- yield response.token.text
41
  except Exception as e:
42
- yield f"Error: {e}"
43
 
44
  # Streamlit Chat Interface
45
  st.title("Chatbot Testing Interface")
@@ -60,13 +56,8 @@ if prompt:
60
  # 3) Generate the response
61
  with st.chat_message("assistant", avatar=DUBS_PATH):
62
  with st.spinner("Dubs is thinking... Woof Woof! 🐾"):
63
- full_response = ""
64
- placeholder = st.empty() # Placeholder for streaming response
65
- response = stream_response(chat_history, HF_API_KEY)
66
- for item in response:
67
- full_response += item
68
- placeholder.markdown(full_response)
69
- placeholder.markdown(full_response)
70
 
71
  # 4) Add assistant response to the session state
72
  st.session_state["messages"].append({"role": "assistant", "content": full_response})
 
14
  if "messages" not in st.session_state:
15
  st.session_state["messages"] = []
16
 
17
+ # Function to Fetch Response (Non-Streaming)
18
+ def fetch_response(prompt_text, api_key):
19
  """
20
+ Fetch full text response from the HF Inference Endpoint using the InferenceClient.
21
+ Returns the generated text as a whole.
22
  """
23
  client = InferenceClient(SPACE_URL, token=api_key)
24
 
 
31
  "stop_sequences": ["<|endoftext|>"]
32
  }
33
 
 
 
34
  try:
35
+ response = client.text_generation(prompt_text, stream=False, details=True, **gen_kwargs)
36
+ return response["generated_text"]
 
 
37
  except Exception as e:
38
+ return f"Error: {e}"
39
 
40
  # Streamlit Chat Interface
41
  st.title("Chatbot Testing Interface")
 
56
  # 3) Generate the response
57
  with st.chat_message("assistant", avatar=DUBS_PATH):
58
  with st.spinner("Dubs is thinking... Woof Woof! 🐾"):
59
+ full_response = fetch_response(chat_history, HF_API_KEY)
60
+ st.write(full_response)
 
 
 
 
 
61
 
62
  # 4) Add assistant response to the session state
63
  st.session_state["messages"].append({"role": "assistant", "content": full_response})