Spaces:
Running
Running
Updated limits
Browse filesAdded limits to prompt input and token output to prevent overload.
app.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
""" Simple Chatbot
|
| 2 |
@author: Nigel Gebodh
|
| 3 |
@email: nigel.gebodh@gmail.com
|
|
|
|
| 4 |
"""
|
| 5 |
import numpy as np
|
| 6 |
import streamlit as st
|
|
@@ -13,10 +14,13 @@ load_dotenv()
|
|
| 13 |
# #===========================================
|
| 14 |
# updates = '''
|
| 15 |
# Updates
|
| 16 |
-
# +01/
|
|
|
|
|
|
|
|
|
|
| 17 |
# - Updated logging info
|
| 18 |
|
| 19 |
-
# +10/10/2025
|
| 20 |
# - Update the model options since Gemma-2-9B-it
|
| 21 |
# is no longer supported. Replaced with GPT-OSS-120B
|
| 22 |
|
|
@@ -54,7 +58,7 @@ from datetime import datetime
|
|
| 54 |
try:
|
| 55 |
LOGGER_TOOL_WEBHOOK = os.environ.get("LOGGER_TOOL_URL")
|
| 56 |
except Exception as e:
|
| 57 |
-
print(f"❌ Error in loading LOGGER_TOOL_WEBHOOK
|
| 58 |
|
| 59 |
|
| 60 |
|
|
@@ -93,7 +97,7 @@ def log_to_webhook(
|
|
| 93 |
try:
|
| 94 |
requests.post(LOGGER_TOOL_WEBHOOK, json=payload, timeout=3)
|
| 95 |
except Exception as e:
|
| 96 |
-
print("Logging failed
|
| 97 |
|
| 98 |
# --------------------------------------------
|
| 99 |
|
|
@@ -112,6 +116,7 @@ def get_session_info():
|
|
| 112 |
data = {
|
| 113 |
"timezone": time.tzname,
|
| 114 |
"platform": sys.platform,
|
|
|
|
| 115 |
}
|
| 116 |
raw = json.dumps(data, sort_keys=True)
|
| 117 |
return hashlib.sha256(raw.encode()).hexdigest()[:12]
|
|
@@ -137,21 +142,14 @@ def reset_conversation():
|
|
| 137 |
st.session_state.messages = []
|
| 138 |
st.session_state.session_info["conversation_id"] = str(uuid.uuid4())
|
| 139 |
|
|
|
|
| 140 |
|
| 141 |
|
| 142 |
|
| 143 |
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
|
| 156 |
API_CALL_LIMIT = 20 # Define the limit
|
| 157 |
|
|
@@ -161,6 +159,15 @@ if 'api_call_count' not in st.session_state:
|
|
| 161 |
|
| 162 |
|
| 163 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 164 |
model_links_hf ={
|
| 165 |
"Gemma-3-27B-it":{
|
| 166 |
"inf_point":"https://router.huggingface.co/nebius/v1",
|
|
@@ -404,8 +411,19 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question "):
|
|
| 404 |
# Add user message to chat history
|
| 405 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 406 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 407 |
|
| 408 |
|
|
|
|
| 409 |
if st.session_state.api_call_count >= API_CALL_LIMIT:
|
| 410 |
|
| 411 |
# Add the warning to the displayed messages, but not to the history sent to the model
|
|
@@ -431,7 +449,7 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question "):
|
|
| 431 |
],
|
| 432 |
temperature=temp_values,#0.5,
|
| 433 |
stream=True,
|
| 434 |
-
max_tokens=3000,
|
| 435 |
)
|
| 436 |
|
| 437 |
response = st.write_stream(stream)
|
|
|
|
| 1 |
""" Simple Chatbot
|
| 2 |
@author: Nigel Gebodh
|
| 3 |
@email: nigel.gebodh@gmail.com
|
| 4 |
+
@website: https://ngebodh.github.io/
|
| 5 |
"""
|
| 6 |
import numpy as np
|
| 7 |
import streamlit as st
|
|
|
|
| 14 |
# #===========================================
|
| 15 |
# updates = '''
|
| 16 |
# Updates
|
| 17 |
+
# + 01/10/2026
|
| 18 |
+
# - Updated cooldown
|
| 19 |
+
|
| 20 |
+
# + 01/08/2026
|
| 21 |
# - Updated logging info
|
| 22 |
|
| 23 |
+
# + 10/10/2025
|
| 24 |
# - Update the model options since Gemma-2-9B-it
|
| 25 |
# is no longer supported. Replaced with GPT-OSS-120B
|
| 26 |
|
|
|
|
| 58 |
try:
|
| 59 |
LOGGER_TOOL_WEBHOOK = os.environ.get("LOGGER_TOOL_URL")
|
| 60 |
except Exception as e:
|
| 61 |
+
print(f"❌ Error in loading LOGGER_TOOL_WEBHOOK")
|
| 62 |
|
| 63 |
|
| 64 |
|
|
|
|
| 97 |
try:
|
| 98 |
requests.post(LOGGER_TOOL_WEBHOOK, json=payload, timeout=3)
|
| 99 |
except Exception as e:
|
| 100 |
+
print("Logging failed")
|
| 101 |
|
| 102 |
# --------------------------------------------
|
| 103 |
|
|
|
|
| 116 |
data = {
|
| 117 |
"timezone": time.tzname,
|
| 118 |
"platform": sys.platform,
|
| 119 |
+
"rand": uuid.uuid4().hex,
|
| 120 |
}
|
| 121 |
raw = json.dumps(data, sort_keys=True)
|
| 122 |
return hashlib.sha256(raw.encode()).hexdigest()[:12]
|
|
|
|
| 142 |
st.session_state.messages = []
|
| 143 |
st.session_state.session_info["conversation_id"] = str(uuid.uuid4())
|
| 144 |
|
| 145 |
+
# --------------------------------------------
|
| 146 |
|
| 147 |
|
| 148 |
|
| 149 |
|
| 150 |
+
#==========================================================
|
| 151 |
+
# Limits
|
| 152 |
+
# --------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
|
| 154 |
API_CALL_LIMIT = 20 # Define the limit
|
| 155 |
|
|
|
|
| 159 |
|
| 160 |
|
| 161 |
|
| 162 |
+
REQUEST_COOLDOWN = 2 # seconds between requests
|
| 163 |
+
|
| 164 |
+
if "last_request_time" not in st.session_state:
|
| 165 |
+
st.session_state.last_request_time = 0
|
| 166 |
+
# --------------------------------------------
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
|
| 171 |
model_links_hf ={
|
| 172 |
"Gemma-3-27B-it":{
|
| 173 |
"inf_point":"https://router.huggingface.co/nebius/v1",
|
|
|
|
| 411 |
# Add user message to chat history
|
| 412 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 413 |
|
| 414 |
+
#Cooldown check
|
| 415 |
+
now = time.time()
|
| 416 |
+
elapsed = now - st.session_state.last_request_time
|
| 417 |
+
|
| 418 |
+
if elapsed < REQUEST_COOLDOWN:
|
| 419 |
+
wait_time = round(REQUEST_COOLDOWN - elapsed, 1)
|
| 420 |
+
st.warning(f"⏳ Please wait before sending another request.")
|
| 421 |
+
st.stop()
|
| 422 |
+
|
| 423 |
+
st.session_state.last_request_time = now
|
| 424 |
|
| 425 |
|
| 426 |
+
|
| 427 |
if st.session_state.api_call_count >= API_CALL_LIMIT:
|
| 428 |
|
| 429 |
# Add the warning to the displayed messages, but not to the history sent to the model
|
|
|
|
| 449 |
],
|
| 450 |
temperature=temp_values,#0.5,
|
| 451 |
stream=True,
|
| 452 |
+
max_tokens=1500, #3000,
|
| 453 |
)
|
| 454 |
|
| 455 |
response = st.write_stream(stream)
|