Spaces:
Running
Running
add-model-selector
#1
by
bradnow
- opened
- .DS_Store +0 -0
- .gitignore +1 -5
- README.md +4 -4
- __pycache__/utils.cpython-310.pyc +0 -0
- app.py +142 -638
- gradio_runner.py +0 -10
- log_chat.py +0 -250
- requirements.txt +3 -5
- styles.css +0 -179
- theme.py +0 -148
- timer.py +0 -114
- utils.py +0 -159
.DS_Store
DELETED
|
Binary file (6.15 kB)
|
|
|
.gitignore
CHANGED
|
@@ -1,5 +1 @@
|
|
| 1 |
-
.idea/*
|
| 2 |
-
__pycache__/
|
| 3 |
-
/.run*/
|
| 4 |
-
/train.csv
|
| 5 |
-
/Makefile
|
|
|
|
| 1 |
+
.idea/*
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
---
|
| 2 |
-
title: Apriel Chat
|
| 3 |
emoji: 💬
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.29.0
|
| 8 |
app_file: app.py
|
|
@@ -11,4 +11,4 @@ license: mit
|
|
| 11 |
short_description: ServiceNow-AI model chat
|
| 12 |
---
|
| 13 |
|
| 14 |
-
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Apriel Nemotron Chat
|
| 3 |
emoji: 💬
|
| 4 |
+
colorFrom: yellow
|
| 5 |
+
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.29.0
|
| 8 |
app_file: app.py
|
|
|
|
| 11 |
short_description: ServiceNow-AI model chat
|
| 12 |
---
|
| 13 |
|
| 14 |
+
An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
|
__pycache__/utils.cpython-310.pyc
DELETED
|
Binary file (3.88 kB)
|
|
|
app.py
CHANGED
|
@@ -1,662 +1,166 @@
|
|
| 1 |
-
import
|
| 2 |
-
|
| 3 |
-
|
| 4 |
|
| 5 |
from openai import OpenAI
|
| 6 |
import gradio as gr
|
| 7 |
-
import
|
| 8 |
-
import
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
# Workaround for PyCharm debugger + uvicorn compatibility error:
|
| 13 |
-
# TypeError: _patch_asyncio.<locals>.run() got an unexpected keyword argument 'loop_factory'
|
| 14 |
-
DEBUG = False
|
| 15 |
-
if DEBUG is True: # or sys.gettrace() is not None: # Debugger is attached
|
| 16 |
-
import asyncio
|
| 17 |
-
_original_run = asyncio.run
|
| 18 |
-
def _patched_run(main, **kwargs):
|
| 19 |
-
kwargs.pop('loop_factory', None) # Remove unsupported arg
|
| 20 |
-
return _original_run(main, **kwargs)
|
| 21 |
-
asyncio.run = _patched_run
|
| 22 |
-
|
| 23 |
-
from theme import apriel
|
| 24 |
-
from utils import COMMUNITY_POSTFIX_URL, get_model_config, check_format, models_config, \
|
| 25 |
-
logged_event_handler, DEBUG_MODE, DEBUG_MODEL, log_debug, log_info, log_error, log_warning
|
| 26 |
-
from log_chat import log_chat
|
| 27 |
-
|
| 28 |
-
DEFAULT_MODEL_TEMPERATURE = 0.6
|
| 29 |
-
BUTTON_WIDTH = 160
|
| 30 |
-
DEFAULT_OPT_OUT_VALUE = DEBUG_MODE
|
| 31 |
-
|
| 32 |
-
# If DEBUG_MODEL is True, use an alternative model (without reasoning) for testing
|
| 33 |
-
# DEFAULT_MODEL_NAME = "Apriel-1.5-15B-thinker" if not DEBUG_MODEL else "Apriel-1.5-15B-thinker"
|
| 34 |
-
DEFAULT_MODEL_NAME = "Apriel-1.6-15B-Thinker"
|
| 35 |
|
| 36 |
-
|
| 37 |
-
INFO_BANNER_MARKDOWN = """
|
| 38 |
-
<span class="banner-message-text">ℹ️ This app has been updated to use the recommended temperature of 0.6. We had set it to 0.8 earlier and expect 0.6 to be better. Please provide feedback using the model link.</span>
|
| 39 |
-
"""
|
| 40 |
-
NEW_MODEL_BANNER_MARKDOWN = """
|
| 41 |
-
<span class="banner-message-text"><span class="banner-message-emoji">🚀</span> Now running [Apriel-1.6-15B-Thinker](https://huggingface.co/ServiceNow-AI/Apriel-1.6-15b-Thinker) - 30% more efficient, frontier-class reasoning</span>
|
| 42 |
-
"""
|
| 43 |
-
BANNER_MARKDOWN = NEW_MODEL_BANNER_MARKDOWN
|
| 44 |
|
| 45 |
-
|
| 46 |
-
BUTTON_DISABLED = gr.update(interactive=False)
|
| 47 |
-
INPUT_ENABLED = gr.update(interactive=True)
|
| 48 |
-
INPUT_DISABLED = gr.update(interactive=False)
|
| 49 |
-
DROPDOWN_ENABLED = gr.update(interactive=True)
|
| 50 |
-
DROPDOWN_DISABLED = gr.update(interactive=False)
|
| 51 |
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
STOP_BUTTON_ENABLED = gr.update(interactive=True, visible=True)
|
| 55 |
-
STOP_BUTTON_DISABLED = gr.update(interactive=True, visible=False)
|
| 56 |
|
| 57 |
chat_start_count = 0
|
| 58 |
-
model_config = {}
|
| 59 |
-
openai_client = None
|
| 60 |
-
|
| 61 |
-
USE_RANDOM_ENDPOINT = False
|
| 62 |
-
endpoint_rotation_count = 0
|
| 63 |
-
|
| 64 |
-
# Maximum number of image messages allowed per request
|
| 65 |
-
MAX_IMAGE_MESSAGES = 5
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
def app_loaded(state, request: gr.Request):
|
| 69 |
-
message_html = setup_model(DEFAULT_MODEL_NAME, intial=False)
|
| 70 |
-
state['session'] = request.session_hash if request else uuid4().hex
|
| 71 |
-
log_debug(f"app_loaded() --> Session: {state['session']}")
|
| 72 |
-
return state, message_html
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
def update_model_and_clear_chat(model_name):
|
| 76 |
-
actual_model_name = model_name.replace("Model: ", "")
|
| 77 |
-
desc = setup_model(actual_model_name)
|
| 78 |
-
return desc, []
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
def setup_model(model_key, intial=False):
|
| 82 |
-
global model_config, openai_client, endpoint_rotation_count
|
| 83 |
-
model_config = get_model_config(model_key)
|
| 84 |
-
log_debug(f"update_model() --> Model config: {model_config}")
|
| 85 |
-
|
| 86 |
-
url_list = (model_config.get('VLLM_API_URL_LIST') or "").split(",")
|
| 87 |
-
if USE_RANDOM_ENDPOINT:
|
| 88 |
-
base_url = random.choice(url_list) if len(url_list) > 0 else model_config.get('VLLM_API_URL')
|
| 89 |
-
else:
|
| 90 |
-
base_url = url_list[endpoint_rotation_count % len(url_list)]
|
| 91 |
-
endpoint_rotation_count += 1
|
| 92 |
-
|
| 93 |
-
openai_client = OpenAI(
|
| 94 |
-
api_key=model_config.get('AUTH_TOKEN'),
|
| 95 |
-
base_url=base_url
|
| 96 |
-
)
|
| 97 |
-
model_config['base_url'] = base_url
|
| 98 |
-
log_debug(f"Switched to model {model_key} using endpoint {base_url}")
|
| 99 |
-
|
| 100 |
-
_model_hf_name = model_config.get("MODEL_HF_URL").split('https://huggingface.co/')[1]
|
| 101 |
-
_link = f"<a href='{model_config.get('MODEL_HF_URL')}{COMMUNITY_POSTFIX_URL}' target='_blank'>{_model_hf_name}</a>"
|
| 102 |
-
_description = f"We'd love to hear your thoughts on the model. Click here to provide feedback - {_link}"
|
| 103 |
-
|
| 104 |
-
if intial:
|
| 105 |
-
return
|
| 106 |
-
else:
|
| 107 |
-
return _description
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
def chat_started():
|
| 111 |
-
# outputs: model_dropdown, user_input, send_btn, stop_btn, clear_btn
|
| 112 |
-
return (DROPDOWN_DISABLED, gr.update(value="", interactive=False),
|
| 113 |
-
SEND_BUTTON_DISABLED, STOP_BUTTON_ENABLED, BUTTON_DISABLED)
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
def chat_finished():
|
| 117 |
-
# outputs: model_dropdown, user_input, send_btn, stop_btn, clear_btn
|
| 118 |
-
return DROPDOWN_ENABLED, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
def stop_chat(state):
|
| 122 |
-
state["stop_flag"] = True
|
| 123 |
-
gr.Info("Chat stopped")
|
| 124 |
-
return state
|
| 125 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
|
| 127 |
-
def toggle_opt_out(state, checkbox):
|
| 128 |
-
state["opt_out"] = checkbox
|
| 129 |
-
return state
|
| 130 |
|
|
|
|
|
|
|
| 131 |
|
| 132 |
-
def run_chat_inference(history, message, state):
|
| 133 |
global chat_start_count
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
log_debug(f"chat_fn() --> Message: {message}")
|
| 161 |
-
log_debug(f"chat_fn() --> History: {history}")
|
| 162 |
-
|
| 163 |
-
# We have multimodal input in this case
|
| 164 |
-
if isinstance(message, Mapping):
|
| 165 |
-
files = message.get("files") or []
|
| 166 |
-
message = message.get("text") or ""
|
| 167 |
-
log_debug(f"chat_fn() --> Message (text only): {message}")
|
| 168 |
-
log_debug(f"chat_fn() --> Files: {files}")
|
| 169 |
-
|
| 170 |
-
# Validate that any uploaded files are images
|
| 171 |
-
if len(files) > 0:
|
| 172 |
-
invalid_files = []
|
| 173 |
-
for path in files:
|
| 174 |
-
try:
|
| 175 |
-
mime, _ = mimetypes.guess_type(path)
|
| 176 |
-
mime = mime or ""
|
| 177 |
-
if not mime.startswith("image/"):
|
| 178 |
-
invalid_files.append((os.path.basename(path), mime or "unknown"))
|
| 179 |
-
except Exception as e:
|
| 180 |
-
log_error(f"Failed to inspect file '{path}': {e}")
|
| 181 |
-
invalid_files.append((os.path.basename(path), "unknown"))
|
| 182 |
-
|
| 183 |
-
if invalid_files:
|
| 184 |
-
msg = "Only image files are allowed. Invalid uploads: " + \
|
| 185 |
-
", ".join([f"{p} (type: {m})" for p, m in invalid_files])
|
| 186 |
-
log_warning(msg)
|
| 187 |
-
gr.Warning(msg)
|
| 188 |
-
yield history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
| 189 |
-
return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
| 190 |
-
|
| 191 |
-
# Enforce maximum number of files/images per request
|
| 192 |
-
if len(files) > MAX_IMAGE_MESSAGES:
|
| 193 |
-
gr.Warning(f"Too many images provided; keeping only the first {MAX_IMAGE_MESSAGES} file(s).")
|
| 194 |
-
files = files[:MAX_IMAGE_MESSAGES]
|
| 195 |
-
|
| 196 |
-
try:
|
| 197 |
-
# Check if the message is empty
|
| 198 |
-
if not message.strip() and len(files) == 0:
|
| 199 |
-
gr.Info("Please enter a message before sending")
|
| 200 |
-
yield history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
| 201 |
-
return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
| 202 |
-
|
| 203 |
-
chat_start_count = chat_start_count + 1
|
| 204 |
-
user_messages_count = sum(1 for item in history if isinstance(item, dict) and item.get("role") == "user"
|
| 205 |
-
and isinstance(item.get("content"), str))
|
| 206 |
-
log_info(f"chat_start_count: {chat_start_count}, turns: {user_messages_count + 1}, model: {model_name}")
|
| 207 |
-
|
| 208 |
-
is_reasoning = model_config.get("REASONING")
|
| 209 |
-
|
| 210 |
-
# Remove any assistant messages with metadata from history for multiple turns
|
| 211 |
-
log_debug(f"Initial History: {history}")
|
| 212 |
-
check_format(history, "messages")
|
| 213 |
-
# Build UI history: add text (if any) and per-file image placeholders {"path": ...}
|
| 214 |
-
# Build API parts separately later to avoid Gradio issues with arrays in content
|
| 215 |
-
if len(files) == 0:
|
| 216 |
-
history.append({"role": "user", "content": message})
|
| 217 |
-
else:
|
| 218 |
-
if message.strip():
|
| 219 |
-
history.append({"role": "user", "content": message})
|
| 220 |
-
for path in files:
|
| 221 |
-
history.append({"role": "user", "content": {"path": path}})
|
| 222 |
-
|
| 223 |
-
log_debug(f"History with user message: {history}")
|
| 224 |
-
check_format(history, "messages")
|
| 225 |
-
|
| 226 |
-
# Create the streaming response
|
| 227 |
-
try:
|
| 228 |
-
history_no_thoughts = [item for item in history if
|
| 229 |
-
not (isinstance(item, dict) and
|
| 230 |
-
item.get("role") == "assistant" and
|
| 231 |
-
isinstance(item.get("metadata"), dict) and
|
| 232 |
-
item.get("metadata", {}).get("title") is not None)]
|
| 233 |
-
log_debug(f"Updated History: {history_no_thoughts}")
|
| 234 |
-
check_format(history_no_thoughts, "messages")
|
| 235 |
-
log_debug(f"history_no_thoughts with user message: {history_no_thoughts}")
|
| 236 |
-
|
| 237 |
-
# Build API-specific messages:
|
| 238 |
-
# - Convert any UI image placeholders {"path": ...} to image_url parts
|
| 239 |
-
# - Convert any user string content that is a valid file path to image_url parts
|
| 240 |
-
# - Coalesce consecutive image paths into a single image-only user message
|
| 241 |
-
api_messages = []
|
| 242 |
-
image_parts_buffer = []
|
| 243 |
-
|
| 244 |
-
def flush_image_buffer():
|
| 245 |
-
if len(image_parts_buffer) > 0:
|
| 246 |
-
api_messages.append({"role": "user", "content": list(image_parts_buffer)})
|
| 247 |
-
image_parts_buffer.clear()
|
| 248 |
-
|
| 249 |
-
def to_image_part(path: str):
|
| 250 |
-
try:
|
| 251 |
-
mime, _ = mimetypes.guess_type(path)
|
| 252 |
-
mime = mime or "application/octet-stream"
|
| 253 |
-
with open(path, "rb") as f:
|
| 254 |
-
b64 = base64.b64encode(f.read()).decode("utf-8")
|
| 255 |
-
data_url = f"data:{mime};base64,{b64}"
|
| 256 |
-
return {"type": "image_url", "image_url": {"url": data_url}}
|
| 257 |
-
except Exception as e:
|
| 258 |
-
log_error(f"Failed to load file '{path}': {e}")
|
| 259 |
-
return None
|
| 260 |
-
|
| 261 |
-
def normalize_msg(msg):
|
| 262 |
-
# Returns (role, content, as_dict) where as_dict is a message dict suitable to pass through when unmodified
|
| 263 |
-
if isinstance(msg, dict):
|
| 264 |
-
return msg.get("role"), msg.get("content"), msg
|
| 265 |
-
# Gradio ChatMessage-like object
|
| 266 |
-
role = getattr(msg, "role", None)
|
| 267 |
-
content = getattr(msg, "content", None)
|
| 268 |
-
if role is not None:
|
| 269 |
-
return role, content, {"role": role, "content": content}
|
| 270 |
-
return None, None, msg
|
| 271 |
-
|
| 272 |
-
for m in copy.deepcopy(history_no_thoughts):
|
| 273 |
-
role, content, as_dict = normalize_msg(m)
|
| 274 |
-
# Unknown structure: pass through
|
| 275 |
-
if role is None:
|
| 276 |
-
flush_image_buffer()
|
| 277 |
-
api_messages.append(as_dict)
|
| 278 |
-
continue
|
| 279 |
-
|
| 280 |
-
# Assistant messages pass through as-is
|
| 281 |
-
if role == "assistant":
|
| 282 |
-
flush_image_buffer()
|
| 283 |
-
api_messages.append(as_dict)
|
| 284 |
-
continue
|
| 285 |
-
|
| 286 |
-
# Only user messages have potential image paths to convert
|
| 287 |
-
if role == "user":
|
| 288 |
-
# Case A: {'path': ...}
|
| 289 |
-
if isinstance(content, dict) and isinstance(content.get("path"), str):
|
| 290 |
-
p = content["path"]
|
| 291 |
-
part = to_image_part(p) if os.path.isfile(p) else None
|
| 292 |
-
if part:
|
| 293 |
-
image_parts_buffer.append(part)
|
| 294 |
-
else:
|
| 295 |
-
flush_image_buffer()
|
| 296 |
-
api_messages.append({"role": "user", "content": str(content)})
|
| 297 |
-
continue
|
| 298 |
-
|
| 299 |
-
# Case B: string or tuple content that may be a file path
|
| 300 |
-
if isinstance(content, str):
|
| 301 |
-
if os.path.isfile(content):
|
| 302 |
-
part = to_image_part(content)
|
| 303 |
-
if part:
|
| 304 |
-
image_parts_buffer.append(part)
|
| 305 |
-
continue
|
| 306 |
-
# Not a file path: pass through as text
|
| 307 |
-
flush_image_buffer()
|
| 308 |
-
api_messages.append({"role": "user", "content": content})
|
| 309 |
-
continue
|
| 310 |
-
if isinstance(content, tuple):
|
| 311 |
-
# Common case: a single-element tuple containing a path string
|
| 312 |
-
tuple_items = list(content)
|
| 313 |
-
tmp_parts = []
|
| 314 |
-
text_accum = []
|
| 315 |
-
for item in tuple_items:
|
| 316 |
-
if isinstance(item, str) and os.path.isfile(item):
|
| 317 |
-
part = to_image_part(item)
|
| 318 |
-
if part:
|
| 319 |
-
tmp_parts.append(part)
|
| 320 |
-
else:
|
| 321 |
-
text_accum.append(item)
|
| 322 |
-
else:
|
| 323 |
-
text_accum.append(str(item))
|
| 324 |
-
if tmp_parts:
|
| 325 |
-
flush_image_buffer()
|
| 326 |
-
api_messages.append({"role": "user", "content": tmp_parts})
|
| 327 |
-
if not text_accum:
|
| 328 |
-
continue
|
| 329 |
-
if text_accum:
|
| 330 |
-
flush_image_buffer()
|
| 331 |
-
api_messages.append({"role": "user", "content": "\n".join(text_accum)})
|
| 332 |
-
continue
|
| 333 |
-
|
| 334 |
-
# Case C: list content
|
| 335 |
-
if isinstance(content, list):
|
| 336 |
-
# If it's already a list of parts, let it pass through
|
| 337 |
-
all_dicts = all(isinstance(c, dict) for c in content)
|
| 338 |
-
if all_dicts:
|
| 339 |
-
flush_image_buffer()
|
| 340 |
-
api_messages.append({"role": "user", "content": content})
|
| 341 |
-
continue
|
| 342 |
-
# It might be a list of strings (paths/text). Convert string paths to image parts, others to text parts
|
| 343 |
-
tmp_parts = []
|
| 344 |
-
text_accum = []
|
| 345 |
-
|
| 346 |
-
def flush_text_accum():
|
| 347 |
-
if text_accum:
|
| 348 |
-
api_messages.append({"role": "user", "content": "\n".join(text_accum)})
|
| 349 |
-
text_accum.clear()
|
| 350 |
-
for item in content:
|
| 351 |
-
if isinstance(item, str) and os.path.isfile(item):
|
| 352 |
-
part = to_image_part(item)
|
| 353 |
-
if part:
|
| 354 |
-
tmp_parts.append(part)
|
| 355 |
-
else:
|
| 356 |
-
text_accum.append(item)
|
| 357 |
-
else:
|
| 358 |
-
text_accum.append(str(item))
|
| 359 |
-
if tmp_parts:
|
| 360 |
-
flush_image_buffer()
|
| 361 |
-
api_messages.append({"role": "user", "content": tmp_parts})
|
| 362 |
-
if text_accum:
|
| 363 |
-
flush_text_accum()
|
| 364 |
-
continue
|
| 365 |
-
|
| 366 |
-
# Fallback: pass through
|
| 367 |
-
flush_image_buffer()
|
| 368 |
-
api_messages.append(as_dict)
|
| 369 |
-
continue
|
| 370 |
-
|
| 371 |
-
# Other roles
|
| 372 |
-
flush_image_buffer()
|
| 373 |
-
api_messages.append(as_dict)
|
| 374 |
-
|
| 375 |
-
# Flush any trailing images
|
| 376 |
-
flush_image_buffer()
|
| 377 |
-
|
| 378 |
-
log_debug(f"sending api_messages to model {model_name}: {api_messages}")
|
| 379 |
-
|
| 380 |
-
# Ensure we don't send too many images (count only messages whose content is a list of parts)
|
| 381 |
-
image_msg_indices = [
|
| 382 |
-
i for i, msg in enumerate(api_messages)
|
| 383 |
-
if isinstance(msg, dict) and isinstance(msg.get('content'), list)
|
| 384 |
-
]
|
| 385 |
-
image_count = len(image_msg_indices)
|
| 386 |
-
if image_count > MAX_IMAGE_MESSAGES:
|
| 387 |
-
# Remove oldest image messages until we have MAX_IMAGE_MESSAGES or fewer
|
| 388 |
-
to_remove = image_count - MAX_IMAGE_MESSAGES
|
| 389 |
-
removed = 0
|
| 390 |
-
for idx in image_msg_indices:
|
| 391 |
-
if removed >= to_remove:
|
| 392 |
-
break
|
| 393 |
-
# Pop considering prior removals shift indices
|
| 394 |
-
api_messages.pop(idx - removed)
|
| 395 |
-
removed += 1
|
| 396 |
-
gr.Warning(f"Too many images provided; keeping the latest {MAX_IMAGE_MESSAGES} and dropped {removed} older image message(s).")
|
| 397 |
-
|
| 398 |
-
stream = openai_client.chat.completions.create(
|
| 399 |
-
model=model_name,
|
| 400 |
-
messages=api_messages,
|
| 401 |
-
temperature=temperature,
|
| 402 |
-
stream=True
|
| 403 |
-
)
|
| 404 |
-
except Exception as e:
|
| 405 |
-
log_error(f"Error:\n\t{e}\n\tInference failed for model {model_name} and endpoint {model_config['base_url']}")
|
| 406 |
-
error = str(e)
|
| 407 |
-
yield ([{"role": "assistant",
|
| 408 |
-
"content": "😔 The model is unavailable at the moment. Please try again later."}],
|
| 409 |
-
INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state)
|
| 410 |
-
if state["opt_out"] is not True:
|
| 411 |
-
log_chat(chat_id=state["chat_id"],
|
| 412 |
-
session_id=state["session"],
|
| 413 |
-
model_name=model_name,
|
| 414 |
-
prompt=message,
|
| 415 |
-
history=history,
|
| 416 |
-
info={"is_reasoning": model_config.get("REASONING"), "temperature": temperature,
|
| 417 |
-
"stopped": True, "error": str(e)},
|
| 418 |
-
)
|
| 419 |
-
else:
|
| 420 |
-
log_info(f"User opted out of chat history. Not logging chat. model: {model_name}")
|
| 421 |
-
return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
| 422 |
|
| 423 |
-
|
| 424 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 425 |
role="assistant",
|
| 426 |
-
content=
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
check_format(history, "messages")
|
| 431 |
-
else:
|
| 432 |
history.append(gr.ChatMessage(
|
| 433 |
role="assistant",
|
| 434 |
-
content=
|
| 435 |
))
|
| 436 |
-
log_debug(f"History added empty assistant: {history}")
|
| 437 |
-
check_format(history, "messages")
|
| 438 |
-
|
| 439 |
-
output = ""
|
| 440 |
-
completion_started = False
|
| 441 |
-
for chunk in stream:
|
| 442 |
-
if state["stop_flag"]:
|
| 443 |
-
log_debug(f"chat_fn() --> Stopping streaming...")
|
| 444 |
-
break # Exit the loop if the stop flag is set
|
| 445 |
-
# Extract the new content from the delta field
|
| 446 |
-
content = getattr(chunk.choices[0].delta, "content", "") or ""
|
| 447 |
-
reasoning_content = getattr(chunk.choices[0].delta, "reasoning_content", "") or ""
|
| 448 |
-
output += reasoning_content + content
|
| 449 |
-
|
| 450 |
-
if is_reasoning:
|
| 451 |
-
parts = output.split(output_tag_start)
|
| 452 |
-
|
| 453 |
-
if len(parts) > 1:
|
| 454 |
-
if parts[1].endswith(output_tag_end):
|
| 455 |
-
parts[1] = parts[1].replace(output_tag_end, "")
|
| 456 |
-
if parts[1].endswith(f"{output_tag_end}\n{output_stop_token}"):
|
| 457 |
-
parts[1] = parts[1].replace(f"{output_tag_end}\n{output_stop_token}", "")
|
| 458 |
-
if parts[1].endswith(f"{output_tag_end}\n{output_stop_token}\n"):
|
| 459 |
-
parts[1] = parts[1].replace(f"{output_tag_end}\n{output_stop_token}\n", "")
|
| 460 |
-
if parts[1].endswith(f"{output_stop_token}"):
|
| 461 |
-
parts[1] = parts[1].replace(f"{output_stop_token}", "")
|
| 462 |
-
if parts[1].endswith(f"{output_stop_token}\n"):
|
| 463 |
-
parts[1] = parts[1].replace(f"{output_stop_token}\n", "")
|
| 464 |
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
)
|
| 470 |
-
if completion_started:
|
| 471 |
-
history[-1] = gr.ChatMessage(
|
| 472 |
-
role="assistant",
|
| 473 |
-
content=parts[1]
|
| 474 |
-
)
|
| 475 |
-
elif len(parts) > 1 and not completion_started:
|
| 476 |
-
completion_started = True
|
| 477 |
-
history.append(gr.ChatMessage(
|
| 478 |
-
role="assistant",
|
| 479 |
-
content=parts[1]
|
| 480 |
-
))
|
| 481 |
-
else:
|
| 482 |
-
if output.endswith("<|end|>"):
|
| 483 |
-
output = output.replace("<|end|>", "")
|
| 484 |
-
if output.endswith("<|end|>\n"):
|
| 485 |
-
output = output.replace("<|end|>\n", "")
|
| 486 |
-
history[-1] = gr.ChatMessage(
|
| 487 |
-
role="assistant",
|
| 488 |
-
content=output
|
| 489 |
-
)
|
| 490 |
|
| 491 |
-
|
| 492 |
-
|
| 493 |
|
| 494 |
-
log_debug(f"Final History: {history}")
|
| 495 |
-
check_format(history, "messages")
|
| 496 |
-
yield history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
| 497 |
-
finally:
|
| 498 |
-
if error is None:
|
| 499 |
-
log_debug(f"chat_fn() --> Finished streaming. {chat_start_count} chats started.")
|
| 500 |
-
if state["opt_out"] is not True:
|
| 501 |
-
log_chat(chat_id=state["chat_id"],
|
| 502 |
-
session_id=state["session"],
|
| 503 |
-
model_name=model_name,
|
| 504 |
-
prompt=message,
|
| 505 |
-
history=history,
|
| 506 |
-
info={"is_reasoning": model_config.get("REASONING"), "temperature": temperature,
|
| 507 |
-
"stopped": state["stop_flag"]},
|
| 508 |
-
)
|
| 509 |
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
state["is_streaming"] = False
|
| 513 |
-
state["stop_flag"] = False
|
| 514 |
-
return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
|
| 515 |
|
|
|
|
| 516 |
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
title
|
| 520 |
-
description
|
| 521 |
-
theme
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
custom_css = f.read()
|
| 525 |
-
|
| 526 |
-
with gr.Blocks(theme=theme, css=custom_css) as demo:
|
| 527 |
-
session_state = gr.State(value={
|
| 528 |
-
"is_streaming": False,
|
| 529 |
-
"stop_flag": False,
|
| 530 |
-
"chat_id": None,
|
| 531 |
-
"session": None,
|
| 532 |
-
"opt_out": DEFAULT_OPT_OUT_VALUE,
|
| 533 |
-
}) # Store session state as a dictionary
|
| 534 |
-
|
| 535 |
-
gr.HTML(f"""
|
| 536 |
-
<style>
|
| 537 |
-
@media (min-width: 1024px) {{
|
| 538 |
-
.send-button-container, .clear-button-container {{
|
| 539 |
-
max-width: {BUTTON_WIDTH}px;
|
| 540 |
-
}}
|
| 541 |
-
}}
|
| 542 |
-
</style>
|
| 543 |
-
""", elem_classes="css-styles")
|
| 544 |
-
if SHOW_BANNER:
|
| 545 |
-
with gr.Row(variant="compact", elem_classes=["responsive-row", "no-padding"], ):
|
| 546 |
-
with gr.Column():
|
| 547 |
-
gr.Markdown(BANNER_MARKDOWN, elem_classes="banner-message")
|
| 548 |
-
|
| 549 |
-
with gr.Row(variant="panel", elem_classes="responsive-row"):
|
| 550 |
-
with gr.Column(scale=1, min_width=400, elem_classes="model-dropdown-container"):
|
| 551 |
-
model_dropdown = gr.Dropdown(
|
| 552 |
-
choices=[f"Model: {model}" for model in models_config.keys()],
|
| 553 |
-
value=f"Model: {DEFAULT_MODEL_NAME}",
|
| 554 |
-
label=None,
|
| 555 |
-
interactive=True,
|
| 556 |
-
container=False,
|
| 557 |
-
scale=0,
|
| 558 |
-
min_width=400
|
| 559 |
-
)
|
| 560 |
-
with gr.Column(scale=4, min_width=0):
|
| 561 |
-
feedback_message_html = gr.HTML(description, elem_classes="model-message")
|
| 562 |
-
|
| 563 |
-
chatbot = gr.Chatbot(
|
| 564 |
-
type="messages",
|
| 565 |
-
height="calc(100svh - 320px)",
|
| 566 |
-
max_height="calc(100svh - 320px)",
|
| 567 |
-
elem_classes="chatbot",
|
| 568 |
-
)
|
| 569 |
-
|
| 570 |
-
with gr.Row():
|
| 571 |
-
with gr.Column(scale=10, min_width=400, elem_classes="user-input-container"):
|
| 572 |
-
with gr.Row():
|
| 573 |
-
user_input = gr.MultimodalTextbox(
|
| 574 |
-
interactive=True,
|
| 575 |
-
container=False,
|
| 576 |
-
file_count="multiple",
|
| 577 |
-
placeholder="Type your message here and press Enter or upload file...",
|
| 578 |
-
show_label=False,
|
| 579 |
-
sources=["upload"],
|
| 580 |
-
max_plain_text_length=100000,
|
| 581 |
-
max_lines=10
|
| 582 |
-
)
|
| 583 |
-
|
| 584 |
-
# Original text-only input
|
| 585 |
-
# user_input = gr.Textbox(
|
| 586 |
-
# show_label=False,
|
| 587 |
-
# placeholder="Type your message here and press Enter",
|
| 588 |
-
# container=False
|
| 589 |
-
# )
|
| 590 |
-
with gr.Column(scale=1, min_width=BUTTON_WIDTH * 2 + 20):
|
| 591 |
-
with gr.Row():
|
| 592 |
-
with gr.Column(scale=1, min_width=BUTTON_WIDTH, elem_classes="send-button-container"):
|
| 593 |
-
send_btn = gr.Button("Send", variant="primary", elem_classes="control-button")
|
| 594 |
-
stop_btn = gr.Button("Stop", variant="cancel", elem_classes="control-button", visible=False)
|
| 595 |
-
with gr.Column(scale=1, min_width=BUTTON_WIDTH, elem_classes="clear-button-container"):
|
| 596 |
-
clear_btn = gr.ClearButton(chatbot, value="New Chat", variant="secondary", elem_classes="control-button")
|
| 597 |
-
with gr.Row():
|
| 598 |
-
with gr.Column(min_width=400, elem_classes="opt-out-container"):
|
| 599 |
-
with gr.Row():
|
| 600 |
-
gr.HTML(
|
| 601 |
-
"We may use your chats to improve our AI. You may opt out if you don’t want your conversations saved.",
|
| 602 |
-
elem_classes="opt-out-message")
|
| 603 |
-
with gr.Row():
|
| 604 |
-
opt_out_checkbox = gr.Checkbox(
|
| 605 |
-
label="Don’t save my chat history for improvements or training",
|
| 606 |
-
value=DEFAULT_OPT_OUT_VALUE,
|
| 607 |
-
elem_classes="opt-out-checkbox",
|
| 608 |
-
interactive=True,
|
| 609 |
-
container=False
|
| 610 |
-
)
|
| 611 |
-
|
| 612 |
-
gr.on(
|
| 613 |
-
triggers=[send_btn.click, user_input.submit],
|
| 614 |
-
fn=run_chat_inference, # this generator streams results. do not use logged_event_handler wrapper
|
| 615 |
-
inputs=[chatbot, user_input, session_state],
|
| 616 |
-
outputs=[chatbot, user_input, send_btn, stop_btn, clear_btn, session_state],
|
| 617 |
-
concurrency_limit=4,
|
| 618 |
-
api_name=False
|
| 619 |
-
).then(
|
| 620 |
-
fn=chat_finished, inputs=None, outputs=[model_dropdown, user_input, send_btn, stop_btn, clear_btn], queue=False)
|
| 621 |
-
|
| 622 |
-
# In parallel, disable or update the UI controls
|
| 623 |
-
gr.on(
|
| 624 |
-
triggers=[send_btn.click, user_input.submit],
|
| 625 |
-
fn=chat_started,
|
| 626 |
-
inputs=None,
|
| 627 |
-
outputs=[model_dropdown, user_input, send_btn, stop_btn, clear_btn],
|
| 628 |
-
queue=False,
|
| 629 |
-
show_progress='hidden',
|
| 630 |
-
api_name=False
|
| 631 |
-
)
|
| 632 |
-
|
| 633 |
-
stop_btn.click(
|
| 634 |
-
fn=stop_chat,
|
| 635 |
-
inputs=[session_state],
|
| 636 |
-
outputs=[session_state],
|
| 637 |
-
api_name=False
|
| 638 |
-
)
|
| 639 |
-
|
| 640 |
-
opt_out_checkbox.change(fn=toggle_opt_out, inputs=[session_state, opt_out_checkbox], outputs=[session_state])
|
| 641 |
-
|
| 642 |
-
# Ensure the model is reset to default on page reload
|
| 643 |
-
demo.load(
|
| 644 |
-
fn=logged_event_handler(
|
| 645 |
-
log_msg="Browser session started",
|
| 646 |
-
event_handler=app_loaded
|
| 647 |
-
),
|
| 648 |
-
inputs=[session_state],
|
| 649 |
-
outputs=[session_state, feedback_message_html],
|
| 650 |
-
queue=True,
|
| 651 |
-
api_name=False
|
| 652 |
-
)
|
| 653 |
-
|
| 654 |
-
model_dropdown.change(
|
| 655 |
-
fn=update_model_and_clear_chat,
|
| 656 |
-
inputs=[model_dropdown],
|
| 657 |
-
outputs=[feedback_message_html, chatbot],
|
| 658 |
-
api_name=False
|
| 659 |
-
)
|
| 660 |
-
|
| 661 |
-
demo.queue(default_concurrency_limit=2).launch(ssr_mode=False, show_api=False, max_file_size="10mb")
|
| 662 |
-
log_info("Gradio app launched")
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import datetime
|
| 4 |
|
| 5 |
from openai import OpenAI
|
| 6 |
import gradio as gr
|
| 7 |
+
from gradio.components.chatbot import ChatMessage, Message
|
| 8 |
+
from typing import (
|
| 9 |
+
Any,
|
| 10 |
+
Literal,
|
| 11 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
+
DEBUG_LOG = False or os.environ.get("DEBUG_LOG") == "True"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
+
print(f"Gradio version: {gr.__version__}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
+
title = None # "ServiceNow-AI Chat" # modelConfig.get('MODE_DISPLAY_NAME')
|
| 18 |
+
description = "Please use the community section on this space to provide feedback! <a href=\"https://huggingface.co/ServiceNow-AI/Apriel-Nemotron-15b-Thinker/discussions\">ServiceNow-AI/Apriel-Nemotron-Chat</a>"
|
|
|
|
|
|
|
| 19 |
|
| 20 |
chat_start_count = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
+
model_config = {
|
| 23 |
+
"MODEL_NAME": os.environ.get("MODEL_NAME"),
|
| 24 |
+
"MODE_DISPLAY_NAME": os.environ.get("MODE_DISPLAY_NAME"),
|
| 25 |
+
"MODEL_HF_URL": os.environ.get("MODEL_HF_URL"),
|
| 26 |
+
"VLLM_API_URL": os.environ.get("VLLM_API_URL"),
|
| 27 |
+
"AUTH_TOKEN": os.environ.get("AUTH_TOKEN")
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
# Initialize the OpenAI client with the vLLM API URL and token
|
| 31 |
+
client = OpenAI(
|
| 32 |
+
api_key=model_config.get('AUTH_TOKEN'),
|
| 33 |
+
base_url=model_config.get('VLLM_API_URL')
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def log_message(message):
|
| 38 |
+
if DEBUG_LOG is True:
|
| 39 |
+
print(message)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Gradio 5.0.1 had issues with checking the message formats. 5.29.0 does not!
|
| 43 |
+
def _check_format(messages: Any, type: Literal["messages", "tuples"] = "messages") -> None:
|
| 44 |
+
if type == "messages":
|
| 45 |
+
all_valid = all(
|
| 46 |
+
isinstance(message, dict)
|
| 47 |
+
and "role" in message
|
| 48 |
+
and "content" in message
|
| 49 |
+
or isinstance(message, ChatMessage | Message)
|
| 50 |
+
for message in messages
|
| 51 |
+
)
|
| 52 |
+
if not all_valid:
|
| 53 |
+
# Display which message is not valid
|
| 54 |
+
for i, message in enumerate(messages):
|
| 55 |
+
if not (isinstance(message, dict) and
|
| 56 |
+
"role" in message and
|
| 57 |
+
"content" in message) and not isinstance(message, ChatMessage | Message):
|
| 58 |
+
print(f"_check_format() --> Invalid message at index {i}: {message}\n", file=sys.stderr)
|
| 59 |
+
break
|
| 60 |
+
|
| 61 |
+
raise Exception(
|
| 62 |
+
"Data incompatible with messages format. Each message should be a dictionary with 'role' and 'content' keys or a ChatMessage object."
|
| 63 |
+
)
|
| 64 |
+
# else:
|
| 65 |
+
# print("_check_format() --> All messages are valid.")
|
| 66 |
+
elif not all(
|
| 67 |
+
isinstance(message, (tuple, list)) and len(message) == 2
|
| 68 |
+
for message in messages
|
| 69 |
+
):
|
| 70 |
+
raise Exception(
|
| 71 |
+
"Data incompatible with tuples format. Each message should be a list of length 2."
|
| 72 |
+
)
|
| 73 |
|
|
|
|
|
|
|
|
|
|
| 74 |
|
| 75 |
+
def chat_fn(message, history):
|
| 76 |
+
log_message(f"{'-' * 80}\nchat_fn() --> Message: {message}")
|
| 77 |
|
|
|
|
| 78 |
global chat_start_count
|
| 79 |
+
chat_start_count = chat_start_count + 1
|
| 80 |
+
print(
|
| 81 |
+
f"{datetime.datetime.now()}: chat_start_count: {chat_start_count}, turns: {int(len(history if history else []) / 3)}")
|
| 82 |
+
|
| 83 |
+
# Remove any assistant messages with metadata from history for multiple turns
|
| 84 |
+
log_message(f"Original History: {history}")
|
| 85 |
+
_check_format(history, "messages")
|
| 86 |
+
history = [item for item in history if
|
| 87 |
+
not (isinstance(item, dict) and
|
| 88 |
+
item.get("role") == "assistant" and
|
| 89 |
+
isinstance(item.get("metadata"), dict) and
|
| 90 |
+
item.get("metadata", {}).get("title") is not None)]
|
| 91 |
+
log_message(f"Updated History: {history}")
|
| 92 |
+
_check_format(history, "messages")
|
| 93 |
+
|
| 94 |
+
history.append({"role": "user", "content": message})
|
| 95 |
+
log_message(f"History with user message: {history}")
|
| 96 |
+
_check_format(history, "messages")
|
| 97 |
+
|
| 98 |
+
# Create the streaming response
|
| 99 |
+
stream = client.chat.completions.create(
|
| 100 |
+
model=model_config.get('MODEL_NAME'),
|
| 101 |
+
messages=history,
|
| 102 |
+
temperature=0.8,
|
| 103 |
+
stream=True
|
| 104 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
|
| 106 |
+
history.append(gr.ChatMessage(
|
| 107 |
+
role="assistant",
|
| 108 |
+
content="Thinking...",
|
| 109 |
+
metadata={"title": "🧠 Thought"}
|
| 110 |
+
))
|
| 111 |
+
log_message(f"History added thinking: {history}")
|
| 112 |
+
_check_format(history, "messages")
|
| 113 |
+
|
| 114 |
+
output = ""
|
| 115 |
+
completion_started = False
|
| 116 |
+
for chunk in stream:
|
| 117 |
+
# Extract the new content from the delta field
|
| 118 |
+
content = getattr(chunk.choices[0].delta, "content", "")
|
| 119 |
+
output += content
|
| 120 |
+
|
| 121 |
+
parts = output.split("[BEGIN FINAL RESPONSE]")
|
| 122 |
+
|
| 123 |
+
if len(parts) > 1:
|
| 124 |
+
if parts[1].endswith("[END FINAL RESPONSE]"):
|
| 125 |
+
parts[1] = parts[1].replace("[END FINAL RESPONSE]", "")
|
| 126 |
+
if parts[1].endswith("[END FINAL RESPONSE]\n<|end|>"):
|
| 127 |
+
parts[1] = parts[1].replace("[END FINAL RESPONSE]\n<|end|>", "")
|
| 128 |
+
|
| 129 |
+
history[-1 if not completion_started else -2] = gr.ChatMessage(
|
| 130 |
+
role="assistant",
|
| 131 |
+
content=parts[0],
|
| 132 |
+
metadata={"title": "🧠 Thought"}
|
| 133 |
+
)
|
| 134 |
+
if completion_started:
|
| 135 |
+
history[-1] = gr.ChatMessage(
|
| 136 |
role="assistant",
|
| 137 |
+
content=parts[1]
|
| 138 |
+
)
|
| 139 |
+
elif len(parts) > 1 and not completion_started:
|
| 140 |
+
completion_started = True
|
|
|
|
|
|
|
| 141 |
history.append(gr.ChatMessage(
|
| 142 |
role="assistant",
|
| 143 |
+
content=parts[1]
|
| 144 |
))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
|
| 146 |
+
# only yield the most recent assistant messages
|
| 147 |
+
messages_to_yield = history[-1:] if not completion_started else history[-2:]
|
| 148 |
+
# _check_format(messages_to_yield, "messages")
|
| 149 |
+
yield messages_to_yield
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
|
| 151 |
+
log_message(f"Final History: {history}")
|
| 152 |
+
_check_format(history, "messages")
|
| 153 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
|
| 155 |
+
# Add the model display name and Hugging Face URL to the description
|
| 156 |
+
# description = f"### Model: [{MODE_DISPLAY_NAME}]({MODEL_HF_URL})"
|
|
|
|
|
|
|
|
|
|
| 157 |
|
| 158 |
+
print(f"Running model {model_config.get('MODE_DISPLAY_NAME')} ({model_config.get('MODEL_NAME')})")
|
| 159 |
|
| 160 |
+
gr.ChatInterface(
|
| 161 |
+
chat_fn,
|
| 162 |
+
title=title,
|
| 163 |
+
description=description,
|
| 164 |
+
theme=gr.themes.Default(primary_hue="green"),
|
| 165 |
+
type="messages",
|
| 166 |
+
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gradio_runner.py
DELETED
|
@@ -1,10 +0,0 @@
|
|
| 1 |
-
import re
|
| 2 |
-
import sys
|
| 3 |
-
from gradio.cli import cli
|
| 4 |
-
|
| 5 |
-
# This runs a gradio app so that it can be automatically reloaded in the browser
|
| 6 |
-
# Example: python gradio_runner.py app.py
|
| 7 |
-
|
| 8 |
-
if __name__ == '__main__':
|
| 9 |
-
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
| 10 |
-
sys.exit(cli())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
log_chat.py
DELETED
|
@@ -1,250 +0,0 @@
|
|
| 1 |
-
import csv
|
| 2 |
-
import os
|
| 3 |
-
import time
|
| 4 |
-
from datetime import datetime
|
| 5 |
-
from queue import Queue
|
| 6 |
-
import threading
|
| 7 |
-
|
| 8 |
-
import pandas as pd
|
| 9 |
-
from gradio import ChatMessage
|
| 10 |
-
from huggingface_hub import HfApi, hf_hub_download
|
| 11 |
-
|
| 12 |
-
from timer import Timer
|
| 13 |
-
from utils import log_warning, log_info, log_debug, log_error
|
| 14 |
-
|
| 15 |
-
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 16 |
-
DATASET_REPO_ID = os.environ.get("APRIEL_PROMPT_DATASET")
|
| 17 |
-
CSV_FILENAME = "train.csv"
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
def log_chat(chat_id: str, session_id: str, model_name: str, prompt: str, history: list[str], info: dict) -> None:
|
| 21 |
-
log_info(f"log_chat() called for chat: {chat_id}, queue size: {log_chat_queue.qsize()}, model: {model_name}")
|
| 22 |
-
log_chat_queue.put((chat_id, session_id, model_name, prompt, history, info))
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
def _log_chat_worker():
|
| 26 |
-
while True:
|
| 27 |
-
chat_id, session_id, model_name, prompt, history, info = log_chat_queue.get()
|
| 28 |
-
try:
|
| 29 |
-
try:
|
| 30 |
-
_log_chat(chat_id, session_id, model_name, prompt, history, info)
|
| 31 |
-
except Exception as e:
|
| 32 |
-
log_error(f"Error logging chat: {e}")
|
| 33 |
-
finally:
|
| 34 |
-
log_chat_queue.task_done()
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
def _log_chat(chat_id: str, session_id: str, model_name: str, prompt: str, history: list[str], info: dict) -> bool:
|
| 38 |
-
log_info(f"_log_chat() storing chat {chat_id}")
|
| 39 |
-
if DATASET_REPO_ID is None:
|
| 40 |
-
log_warning("No dataset repo ID provided. Skipping logging of prompt.")
|
| 41 |
-
return False
|
| 42 |
-
if HF_TOKEN is None:
|
| 43 |
-
log_warning("No HF token provided. Skipping logging of prompt.")
|
| 44 |
-
return False
|
| 45 |
-
|
| 46 |
-
log_timer = Timer('log_chat')
|
| 47 |
-
log_timer.start()
|
| 48 |
-
|
| 49 |
-
# Initialize HF API
|
| 50 |
-
api = HfApi(token=HF_TOKEN)
|
| 51 |
-
|
| 52 |
-
# Check if the dataset repo exists, if not, create it
|
| 53 |
-
try:
|
| 54 |
-
repo_info = api.repo_info(repo_id=DATASET_REPO_ID, repo_type="dataset")
|
| 55 |
-
log_debug(f"log_chat() --> Dataset repo found: {repo_info.id} private={repo_info.private}")
|
| 56 |
-
except Exception: # Create new dataset if none exists
|
| 57 |
-
log_debug(f"log_chat() --> No dataset repo found, creating a new one...")
|
| 58 |
-
api.create_repo(repo_id=DATASET_REPO_ID, repo_type="dataset", private=True)
|
| 59 |
-
|
| 60 |
-
# Ensure messages are in the correct format
|
| 61 |
-
messages = [
|
| 62 |
-
{"role": item.role, "content": item.content,
|
| 63 |
-
"type": "thought" if item.metadata else "completion"} if isinstance(
|
| 64 |
-
item, ChatMessage) else item
|
| 65 |
-
for item in history
|
| 66 |
-
if isinstance(item, dict) and "role" in item and "content" in item or isinstance(item, ChatMessage)
|
| 67 |
-
]
|
| 68 |
-
if len(messages) != len(history):
|
| 69 |
-
log_warning("log_chat() --> Some messages in history are missing 'role' or 'content' keys.")
|
| 70 |
-
|
| 71 |
-
user_messages_count = sum(1 for item in messages if isinstance(item, dict) and item.get("role") == "user"
|
| 72 |
-
and isinstance(item.get("content"), str))
|
| 73 |
-
|
| 74 |
-
# These must match the keys in the new row
|
| 75 |
-
expected_headers = ["timestamp", "chat_id", "turns", "prompt", "messages", "model", "session_id", "info"]
|
| 76 |
-
# Prepare new data row
|
| 77 |
-
new_row = {
|
| 78 |
-
"timestamp": datetime.now().isoformat(),
|
| 79 |
-
"chat_id": chat_id,
|
| 80 |
-
"turns": user_messages_count,
|
| 81 |
-
"prompt": prompt,
|
| 82 |
-
"messages": messages,
|
| 83 |
-
"model": model_name,
|
| 84 |
-
"session_id": session_id,
|
| 85 |
-
"info": info,
|
| 86 |
-
}
|
| 87 |
-
log_timer.add_step("Prepared new data row")
|
| 88 |
-
|
| 89 |
-
# Try to download existing CSV with retry logic
|
| 90 |
-
max_retries = 3
|
| 91 |
-
retry_count = 0
|
| 92 |
-
file_exists = False
|
| 93 |
-
csv_path = None
|
| 94 |
-
row_count = 0
|
| 95 |
-
while retry_count < max_retries:
|
| 96 |
-
try:
|
| 97 |
-
csv_path = hf_hub_download(
|
| 98 |
-
repo_id=DATASET_REPO_ID,
|
| 99 |
-
filename=CSV_FILENAME,
|
| 100 |
-
repo_type="dataset",
|
| 101 |
-
token=HF_TOKEN # Only needed if not already logged in
|
| 102 |
-
)
|
| 103 |
-
# Only read first row to check if file is valid and get row count efficiently
|
| 104 |
-
df_check = pd.read_csv(csv_path, nrows=1)
|
| 105 |
-
file_exists = True
|
| 106 |
-
break # Success, exit the loop
|
| 107 |
-
except Exception as e:
|
| 108 |
-
retry_count += 1
|
| 109 |
-
if retry_count < max_retries:
|
| 110 |
-
retry_delay = 2 * retry_count # Exponential backoff: 2s, 4s, 6s, 8s
|
| 111 |
-
log_warning(
|
| 112 |
-
f"log_chat() --> Download attempt {retry_count} failed: {e}. Retrying in {retry_delay} seconds...")
|
| 113 |
-
time.sleep(retry_delay)
|
| 114 |
-
else:
|
| 115 |
-
log_warning(f"log_chat() --> Failed to download CSV after {max_retries} attempts: {e}")
|
| 116 |
-
file_exists = False
|
| 117 |
-
|
| 118 |
-
log_timer.add_step(f"Downloaded existing CSV (attempts: {retry_count + 1})")
|
| 119 |
-
|
| 120 |
-
# Handle the case where the CSV file does not exist or is invalid
|
| 121 |
-
if file_exists:
|
| 122 |
-
# Check that the headers match our standard headers (only read first row)
|
| 123 |
-
existing_headers = pd.read_csv(csv_path, nrows=0).columns.tolist()
|
| 124 |
-
if set(existing_headers) != set(expected_headers):
|
| 125 |
-
log_warning(f"log_chat() --> CSV {csv_path} has unexpected headers: {existing_headers}. "
|
| 126 |
-
f"\nExpected {expected_headers} "
|
| 127 |
-
f"Will create a new one.")
|
| 128 |
-
dump_hub_csv()
|
| 129 |
-
file_exists = False
|
| 130 |
-
else:
|
| 131 |
-
log_debug(f"log_chat() --> CSV {csv_path} has expected headers: {existing_headers}")
|
| 132 |
-
|
| 133 |
-
# Write out the new row to the CSV file (append isn't working in HF container, so recreate each time)
|
| 134 |
-
log_debug(f"log_chat() --> Writing CSV file, file_exists={file_exists}")
|
| 135 |
-
try:
|
| 136 |
-
if file_exists:
|
| 137 |
-
# Append mode: copy existing file and append new row
|
| 138 |
-
# Use chunked reading to avoid loading entire file into memory
|
| 139 |
-
with open(CSV_FILENAME, "w", newline="\n") as f_out:
|
| 140 |
-
writer = csv.DictWriter(f_out, fieldnames=expected_headers)
|
| 141 |
-
writer.writeheader()
|
| 142 |
-
|
| 143 |
-
# Stream copy existing rows in chunks to minimize memory usage
|
| 144 |
-
chunk_size = 1000
|
| 145 |
-
for chunk in pd.read_csv(csv_path, chunksize=chunk_size):
|
| 146 |
-
for _, row in chunk.iterrows():
|
| 147 |
-
writer.writerow(row.to_dict())
|
| 148 |
-
|
| 149 |
-
# Append new row
|
| 150 |
-
writer.writerow(new_row)
|
| 151 |
-
else:
|
| 152 |
-
# Create new file with just the new row
|
| 153 |
-
with open(CSV_FILENAME, "w", newline="\n") as f:
|
| 154 |
-
writer = csv.DictWriter(f, fieldnames=expected_headers)
|
| 155 |
-
writer.writeheader()
|
| 156 |
-
writer.writerow(new_row)
|
| 157 |
-
|
| 158 |
-
log_debug(f"log_chat() --> Wrote out CSV with new row")
|
| 159 |
-
# dump_local_csv()
|
| 160 |
-
except Exception as e:
|
| 161 |
-
log_error(f"log_chat() --> Error writing to CSV: {e}")
|
| 162 |
-
return False
|
| 163 |
-
|
| 164 |
-
# Upload updated CSV
|
| 165 |
-
api.upload_file(
|
| 166 |
-
path_or_fileobj=CSV_FILENAME,
|
| 167 |
-
path_in_repo=CSV_FILENAME,
|
| 168 |
-
repo_id=DATASET_REPO_ID,
|
| 169 |
-
repo_type="dataset",
|
| 170 |
-
commit_message=f"Added new chat entry at {datetime.now().isoformat()}"
|
| 171 |
-
)
|
| 172 |
-
log_timer.add_step("Uploaded updated CSV")
|
| 173 |
-
log_timer.end()
|
| 174 |
-
log_debug("log_chat() --> Finished logging chat")
|
| 175 |
-
log_debug(log_timer.formatted_result())
|
| 176 |
-
|
| 177 |
-
return True
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
def dump_hub_csv():
|
| 181 |
-
# Verify the file contents by loading it from the hub and printing it out
|
| 182 |
-
try:
|
| 183 |
-
csv_path = hf_hub_download(
|
| 184 |
-
repo_id=DATASET_REPO_ID,
|
| 185 |
-
filename=CSV_FILENAME,
|
| 186 |
-
repo_type="dataset",
|
| 187 |
-
token=HF_TOKEN # Only needed if not already logged in
|
| 188 |
-
)
|
| 189 |
-
df = pd.read_csv(csv_path)
|
| 190 |
-
log_info(df)
|
| 191 |
-
if (df.empty):
|
| 192 |
-
# show raw contents of downloaded csv file
|
| 193 |
-
log_info("Raw file contents:")
|
| 194 |
-
with open(csv_path, 'r') as f:
|
| 195 |
-
print(f.read())
|
| 196 |
-
except Exception as e:
|
| 197 |
-
log_error(f"Error loading CSV from hub: {e}")
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
def dump_local_csv():
|
| 201 |
-
# Verify the file contents by loading it from the local file and printing it out
|
| 202 |
-
try:
|
| 203 |
-
df = pd.read_csv(CSV_FILENAME)
|
| 204 |
-
log_info(df)
|
| 205 |
-
except Exception as e:
|
| 206 |
-
log_error(f"Error loading CSV from local file: {e}")
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
def test_log_chat():
|
| 210 |
-
# Example usage
|
| 211 |
-
chat_id = "12345"
|
| 212 |
-
session_id = "67890"
|
| 213 |
-
model_name = "Apriel-Model"
|
| 214 |
-
prompt = "Hello"
|
| 215 |
-
history = [{"role": "user", "content": prompt}, {"role": "assistant", "content": "Hi there!"}]
|
| 216 |
-
prompt = "100 + 1"
|
| 217 |
-
history = [{'role': 'user', 'content': prompt}, ChatMessage(
|
| 218 |
-
content='Okay, that\'s a simple addition problem. , answer is 2.\n', role='assistant',
|
| 219 |
-
metadata={'title': '🧠 Thought'}, options=[]),
|
| 220 |
-
ChatMessage(content='\nThe result of adding 1 and 1 is:\n\n**2**\n', role='assistant', metadata={},
|
| 221 |
-
options=[])
|
| 222 |
-
]
|
| 223 |
-
info = {"additional_info": "Some extra data"}
|
| 224 |
-
|
| 225 |
-
log_debug("Starting test_log_chat()")
|
| 226 |
-
dump_hub_csv()
|
| 227 |
-
log_chat(chat_id, session_id, model_name, prompt, history, info)
|
| 228 |
-
log_debug("log_chat 1 returned")
|
| 229 |
-
log_chat(chat_id, session_id, model_name, prompt + " + 2", history, info)
|
| 230 |
-
log_debug("log_chat 2 returned")
|
| 231 |
-
log_chat(chat_id, session_id, model_name, prompt + " + 3", history, info)
|
| 232 |
-
log_debug("log_chat 3 returned")
|
| 233 |
-
log_chat(chat_id, session_id, model_name, prompt + " + 4", history, info)
|
| 234 |
-
log_debug("log_chat 4 returned")
|
| 235 |
-
|
| 236 |
-
sleep_seconds = 10
|
| 237 |
-
log_debug(f"Sleeping {sleep_seconds} seconds to let it finish and log the result.")
|
| 238 |
-
time.sleep(sleep_seconds)
|
| 239 |
-
log_debug("Finished sleeping.")
|
| 240 |
-
dump_hub_csv()
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
# Create a queue for logging chat messages
|
| 244 |
-
log_chat_queue = Queue()
|
| 245 |
-
|
| 246 |
-
# Start the worker thread
|
| 247 |
-
threading.Thread(target=_log_chat_worker, daemon=True).start()
|
| 248 |
-
|
| 249 |
-
if __name__ == "__main__":
|
| 250 |
-
test_log_chat()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
|
@@ -1,5 +1,3 @@
|
|
| 1 |
-
huggingface_hub==0.
|
| 2 |
-
gradio==5.
|
| 3 |
-
openai
|
| 4 |
-
pandas~=2.3.3
|
| 5 |
-
datasets~=4.1.1
|
|
|
|
| 1 |
+
huggingface_hub==0.28.1
|
| 2 |
+
gradio==5.29.0
|
| 3 |
+
openai
|
|
|
|
|
|
styles.css
DELETED
|
@@ -1,179 +0,0 @@
|
|
| 1 |
-
:root {
|
| 2 |
-
--color-grey-50: #f9fafb;
|
| 3 |
-
--banner-background: var(--secondary-400);
|
| 4 |
-
--banner-text-color: var(--primary-200);
|
| 5 |
-
--banner-background-dark: var(--secondary-800);
|
| 6 |
-
--banner-text-color-dark: var(--primary-100);
|
| 7 |
-
--banner-text-a-color: var(--primary-100);
|
| 8 |
-
--banner-text-a-color-dark: var(--secondary-100);
|
| 9 |
-
--banner-chrome-height: calc(16px + 43px);
|
| 10 |
-
|
| 11 |
-
/* Chrome height with no banner */
|
| 12 |
-
--chat-chrome-height-wide-no-banner: 320px;
|
| 13 |
-
--chat-chrome-height-narrow-no-banner: 450px;
|
| 14 |
-
|
| 15 |
-
/* Use these when we are not using a banner */
|
| 16 |
-
/*--chat-chrome-height-wide: var(--chat-chrome-height-wide-no-banner);*/
|
| 17 |
-
/*--chat-chrome-height-narrow: var(--chat-chrome-height-narrow-no-banner);*/
|
| 18 |
-
|
| 19 |
-
/* When we are using a banner, add the banner height to the chat chrome height */
|
| 20 |
-
--chat-chrome-height-wide: calc(var(--chat-chrome-height-wide-no-banner) + var(--banner-chrome-height));
|
| 21 |
-
--chat-chrome-height-narrow: calc(var(--chat-chrome-height-narrow-no-banner) + var(--banner-chrome-height));
|
| 22 |
-
}
|
| 23 |
-
|
| 24 |
-
.banner-message {
|
| 25 |
-
background-color: var(--banner-background);
|
| 26 |
-
padding: 5px;
|
| 27 |
-
margin: 0;
|
| 28 |
-
border-radius: 5px;
|
| 29 |
-
border: none;
|
| 30 |
-
}
|
| 31 |
-
|
| 32 |
-
.banner-message-text {
|
| 33 |
-
font-size: 14px;
|
| 34 |
-
font-weight: bolder;
|
| 35 |
-
color: var(--banner-text-color) !important;
|
| 36 |
-
}
|
| 37 |
-
|
| 38 |
-
.banner-message-emoji {
|
| 39 |
-
margin-right: 3px;
|
| 40 |
-
font-size: 16px;
|
| 41 |
-
}
|
| 42 |
-
|
| 43 |
-
.banner-message-text a {
|
| 44 |
-
color: var(--banner-text-a-color) !important;
|
| 45 |
-
}
|
| 46 |
-
|
| 47 |
-
body.dark .banner-message {
|
| 48 |
-
background-color: var(--banner-background-dark) !important;
|
| 49 |
-
}
|
| 50 |
-
body.dark .gradio-container .contain .banner-message .banner-message-text {
|
| 51 |
-
color: var(--banner-text-color-dark) !important;
|
| 52 |
-
}
|
| 53 |
-
|
| 54 |
-
body.dark .gradio-container .contain .banner-message .banner-message-text a {
|
| 55 |
-
color: var(--banner-text-a-color-dark) !important;
|
| 56 |
-
}
|
| 57 |
-
|
| 58 |
-
.toast-body {
|
| 59 |
-
background-color: var(--color-grey-50);
|
| 60 |
-
}
|
| 61 |
-
|
| 62 |
-
.html-container:has(.css-styles) {
|
| 63 |
-
padding: 0;
|
| 64 |
-
margin: 0;
|
| 65 |
-
}
|
| 66 |
-
|
| 67 |
-
.css-styles {
|
| 68 |
-
height: 0;
|
| 69 |
-
}
|
| 70 |
-
|
| 71 |
-
.model-message {
|
| 72 |
-
text-align: end;
|
| 73 |
-
}
|
| 74 |
-
|
| 75 |
-
.model-dropdown-container {
|
| 76 |
-
display: flex;
|
| 77 |
-
align-items: center;
|
| 78 |
-
gap: 10px;
|
| 79 |
-
padding: 0;
|
| 80 |
-
}
|
| 81 |
-
|
| 82 |
-
.user-input-container .multimodal-textbox{
|
| 83 |
-
border: none !important;
|
| 84 |
-
}
|
| 85 |
-
|
| 86 |
-
/* Match the height of the modified multimodal input box on the same row */
|
| 87 |
-
.control-button {
|
| 88 |
-
height: 51px;
|
| 89 |
-
}
|
| 90 |
-
|
| 91 |
-
button.cancel {
|
| 92 |
-
border: var(--button-border-width) solid var(--button-cancel-border-color);
|
| 93 |
-
background: var(--button-cancel-background-fill);
|
| 94 |
-
color: var(--button-cancel-text-color);
|
| 95 |
-
box-shadow: var(--button-cancel-shadow);
|
| 96 |
-
}
|
| 97 |
-
|
| 98 |
-
button.cancel:hover, .cancel[disabled] {
|
| 99 |
-
background: var(--button-cancel-background-fill-hover);
|
| 100 |
-
color: var(--button-cancel-text-color-hover);
|
| 101 |
-
}
|
| 102 |
-
|
| 103 |
-
.opt-out-message {
|
| 104 |
-
top: 8px;
|
| 105 |
-
}
|
| 106 |
-
|
| 107 |
-
.opt-out-message .html-container, .opt-out-checkbox label {
|
| 108 |
-
font-size: 14px !important;
|
| 109 |
-
padding: 0 !important;
|
| 110 |
-
margin: 0 !important;
|
| 111 |
-
color: var(--neutral-400) !important;
|
| 112 |
-
}
|
| 113 |
-
|
| 114 |
-
div.block.chatbot {
|
| 115 |
-
height: calc(100svh - var(--chat-chrome-height-wide)) !important;
|
| 116 |
-
max-height: 900px !important;
|
| 117 |
-
}
|
| 118 |
-
|
| 119 |
-
div.no-padding {
|
| 120 |
-
padding: 0 !important;
|
| 121 |
-
}
|
| 122 |
-
|
| 123 |
-
@media (max-width: 1280px) {
|
| 124 |
-
div.block.chatbot {
|
| 125 |
-
height: calc(100svh - var(--chat-chrome-height-wide)) !important;
|
| 126 |
-
}
|
| 127 |
-
}
|
| 128 |
-
|
| 129 |
-
@media (max-width: 1024px) {
|
| 130 |
-
.responsive-row {
|
| 131 |
-
flex-direction: column;
|
| 132 |
-
}
|
| 133 |
-
|
| 134 |
-
.model-message {
|
| 135 |
-
text-align: start;
|
| 136 |
-
font-size: 10px !important;
|
| 137 |
-
}
|
| 138 |
-
|
| 139 |
-
.model-dropdown-container {
|
| 140 |
-
flex-direction: column;
|
| 141 |
-
align-items: flex-start;
|
| 142 |
-
}
|
| 143 |
-
|
| 144 |
-
div.block.chatbot {
|
| 145 |
-
height: calc(100svh - var(--chat-chrome-height-narrow)) !important;
|
| 146 |
-
}
|
| 147 |
-
}
|
| 148 |
-
|
| 149 |
-
@media (max-width: 400px) {
|
| 150 |
-
.responsive-row {
|
| 151 |
-
flex-direction: column;
|
| 152 |
-
}
|
| 153 |
-
|
| 154 |
-
.model-message {
|
| 155 |
-
text-align: start;
|
| 156 |
-
font-size: 10px !important;
|
| 157 |
-
}
|
| 158 |
-
|
| 159 |
-
.model-dropdown-container {
|
| 160 |
-
flex-direction: column;
|
| 161 |
-
align-items: flex-start;
|
| 162 |
-
}
|
| 163 |
-
|
| 164 |
-
div.block.chatbot {
|
| 165 |
-
max-height: 360px !important;
|
| 166 |
-
}
|
| 167 |
-
}
|
| 168 |
-
|
| 169 |
-
@media (max-height: 932px) {
|
| 170 |
-
.chatbot {
|
| 171 |
-
max-height: 500px !important;
|
| 172 |
-
}
|
| 173 |
-
}
|
| 174 |
-
|
| 175 |
-
@media (max-height: 1280px) {
|
| 176 |
-
div.block.chatbot {
|
| 177 |
-
max-height: 800px !important;
|
| 178 |
-
}
|
| 179 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
theme.py
DELETED
|
@@ -1,148 +0,0 @@
|
|
| 1 |
-
from typing import Iterable
|
| 2 |
-
from gradio.themes import Soft
|
| 3 |
-
from gradio.themes.utils import colors, fonts, sizes
|
| 4 |
-
|
| 5 |
-
colors.teal_gray = colors.Color(
|
| 6 |
-
name="teal_gray",
|
| 7 |
-
c50="#e8f1f4",
|
| 8 |
-
c100="#cddde3",
|
| 9 |
-
c200="#a8c3cf",
|
| 10 |
-
c300="#7da6b8",
|
| 11 |
-
c400="#588aa2",
|
| 12 |
-
c500="#3d6e87",
|
| 13 |
-
c600="#335b70",
|
| 14 |
-
c700="#2b495a",
|
| 15 |
-
c800="#2c5364",
|
| 16 |
-
c900="#233f4b",
|
| 17 |
-
c950="#1b323c",
|
| 18 |
-
)
|
| 19 |
-
|
| 20 |
-
colors.red_gray = colors.Color(
|
| 21 |
-
name="red_gray",
|
| 22 |
-
c50="#f7eded",
|
| 23 |
-
c100="#f5dcdc",
|
| 24 |
-
c200="#efb4b4",
|
| 25 |
-
c300="#e78f8f",
|
| 26 |
-
c400="#d96a6a",
|
| 27 |
-
c500="#c65353",
|
| 28 |
-
c600="#b24444",
|
| 29 |
-
c700="#8f3434",
|
| 30 |
-
c800="#732d2d",
|
| 31 |
-
c900="#5f2626",
|
| 32 |
-
c950="#4d2020",
|
| 33 |
-
)
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
class Apriel(Soft):
|
| 37 |
-
def __init__(
|
| 38 |
-
self,
|
| 39 |
-
*,
|
| 40 |
-
primary_hue: colors.Color | str = colors.gray,
|
| 41 |
-
secondary_hue: colors.Color | str = colors.teal_gray,
|
| 42 |
-
neutral_hue: colors.Color | str = colors.slate,
|
| 43 |
-
# spacing_size: sizes.Size | str = sizes.spacing_md,
|
| 44 |
-
# radius_size: sizes.Size | str = sizes.radius_md,
|
| 45 |
-
text_size: sizes.Size | str = sizes.text_md,
|
| 46 |
-
font: fonts.Font
|
| 47 |
-
| str
|
| 48 |
-
| Iterable[fonts.Font | str] = (
|
| 49 |
-
fonts.GoogleFont("Inconsolata"),
|
| 50 |
-
"Arial",
|
| 51 |
-
"sans-serif",
|
| 52 |
-
),
|
| 53 |
-
font_mono: fonts.Font
|
| 54 |
-
| str
|
| 55 |
-
| Iterable[fonts.Font | str] = (
|
| 56 |
-
fonts.GoogleFont("IBM Plex Mono"),
|
| 57 |
-
"ui-monospace",
|
| 58 |
-
"monospace",
|
| 59 |
-
),
|
| 60 |
-
):
|
| 61 |
-
super().__init__(
|
| 62 |
-
primary_hue=primary_hue,
|
| 63 |
-
secondary_hue=secondary_hue,
|
| 64 |
-
neutral_hue=neutral_hue,
|
| 65 |
-
# spacing_size=spacing_size,
|
| 66 |
-
# radius_size=radius_size,
|
| 67 |
-
text_size=text_size,
|
| 68 |
-
font=font,
|
| 69 |
-
font_mono=font_mono,
|
| 70 |
-
)
|
| 71 |
-
super().set(
|
| 72 |
-
background_fill_primary="*primary_50",
|
| 73 |
-
background_fill_primary_dark="*primary_900",
|
| 74 |
-
|
| 75 |
-
body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
|
| 76 |
-
body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
|
| 77 |
-
|
| 78 |
-
button_primary_text_color="white",
|
| 79 |
-
button_primary_text_color_hover="black",
|
| 80 |
-
button_primary_background_fill="linear-gradient(90deg, *secondary_400, *secondary_400)",
|
| 81 |
-
button_primary_background_fill_hover="linear-gradient(90deg, *secondary_300, *secondary_300)",
|
| 82 |
-
button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_800)",
|
| 83 |
-
button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_500)",
|
| 84 |
-
|
| 85 |
-
button_secondary_text_color="black",
|
| 86 |
-
button_secondary_text_color_hover="white",
|
| 87 |
-
button_secondary_background_fill="linear-gradient(90deg, *primary_300, *primary_300)",
|
| 88 |
-
button_secondary_background_fill_hover="linear-gradient(90deg, *primary_400, *primary_400)",
|
| 89 |
-
button_secondary_background_fill_dark="linear-gradient(90deg, *primary_500, *primary_600)",
|
| 90 |
-
button_secondary_background_fill_hover_dark="linear-gradient(90deg, *primary_500, *primary_500)",
|
| 91 |
-
|
| 92 |
-
button_cancel_background_fill=f"linear-gradient(90deg, {colors.red_gray.c400}, {colors.red_gray.c500})",
|
| 93 |
-
button_cancel_background_fill_dark=f"linear-gradient(90deg, {colors.red_gray.c700}, {colors.red_gray.c800})",
|
| 94 |
-
button_cancel_background_fill_hover=f"linear-gradient(90deg, {colors.red_gray.c500}, {colors.red_gray.c600})",
|
| 95 |
-
button_cancel_background_fill_hover_dark=f"linear-gradient(90deg, {colors.red_gray.c800}, {colors.red_gray.c900})",
|
| 96 |
-
# button_cancel_background_fill=f"linear-gradient(90deg, {colors.red.c400}, {colors.red.c500})",
|
| 97 |
-
# button_cancel_background_fill_dark=f"linear-gradient(90deg, {colors.red.c700}, {colors.red.c800})",
|
| 98 |
-
# button_cancel_background_fill_hover=f"linear-gradient(90deg, {colors.red.c500}, {colors.red.c600})",
|
| 99 |
-
# button_cancel_background_fill_hover_dark=f"linear-gradient(90deg, {colors.red.c800}, {colors.red.c900})",
|
| 100 |
-
button_cancel_text_color="white",
|
| 101 |
-
button_cancel_text_color_dark="white",
|
| 102 |
-
button_cancel_text_color_hover="white",
|
| 103 |
-
button_cancel_text_color_hover_dark="white",
|
| 104 |
-
|
| 105 |
-
# button_cancel_background_fill=colors.red.c500,
|
| 106 |
-
# button_cancel_background_fill_dark=colors.red.c700,
|
| 107 |
-
# button_cancel_background_fill_hover=colors.red.c600,
|
| 108 |
-
# button_cancel_background_fill_hover_dark=colors.red.c800,
|
| 109 |
-
# button_cancel_text_color="white",
|
| 110 |
-
# button_cancel_text_color_dark="white",
|
| 111 |
-
# button_cancel_text_color_hover="white",
|
| 112 |
-
# button_cancel_text_color_hover_dark="white",
|
| 113 |
-
|
| 114 |
-
slider_color="*secondary_300",
|
| 115 |
-
slider_color_dark="*secondary_600",
|
| 116 |
-
block_title_text_weight="600",
|
| 117 |
-
block_border_width="3px",
|
| 118 |
-
block_shadow="*shadow_drop_lg",
|
| 119 |
-
button_primary_shadow="*shadow_drop_lg",
|
| 120 |
-
button_large_padding="11px",
|
| 121 |
-
|
| 122 |
-
color_accent_soft="*primary_100",
|
| 123 |
-
|
| 124 |
-
block_label_background_fill="*primary_200",
|
| 125 |
-
|
| 126 |
-
)
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
apriel = Apriel()
|
| 130 |
-
|
| 131 |
-
# with gr.Blocks(theme=apriel) as demo:
|
| 132 |
-
# textbox = gr.Textbox(label="Name")
|
| 133 |
-
# slider = gr.Slider(label="Count", minimum=0, maximum=100, step=1)
|
| 134 |
-
# with gr.Row():
|
| 135 |
-
# button = gr.Button("Submit", variant="primary")
|
| 136 |
-
# clear = gr.Button("Clear")
|
| 137 |
-
# output = gr.Textbox(label="Output")
|
| 138 |
-
#
|
| 139 |
-
#
|
| 140 |
-
# def repeat(name, count):
|
| 141 |
-
# time.sleep(3)
|
| 142 |
-
# return name * count
|
| 143 |
-
#
|
| 144 |
-
#
|
| 145 |
-
# button.click(repeat, [textbox, slider], output)
|
| 146 |
-
#
|
| 147 |
-
# if __name__ == "__main__":
|
| 148 |
-
# demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
timer.py
DELETED
|
@@ -1,114 +0,0 @@
|
|
| 1 |
-
import time
|
| 2 |
-
import json
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
class Timer:
|
| 6 |
-
def __init__(self, name=None):
|
| 7 |
-
self.name = name
|
| 8 |
-
self.start_time = None
|
| 9 |
-
self.steps = []
|
| 10 |
-
self.total_time = None
|
| 11 |
-
|
| 12 |
-
def clear(self):
|
| 13 |
-
self.start_time = None
|
| 14 |
-
self.steps = []
|
| 15 |
-
self.total_time = None
|
| 16 |
-
|
| 17 |
-
def start(self):
|
| 18 |
-
"""Start the timer."""
|
| 19 |
-
self.start_time = time.time()
|
| 20 |
-
|
| 21 |
-
def is_running(self):
|
| 22 |
-
return self.start_time is not None
|
| 23 |
-
|
| 24 |
-
def add_step(self, step_name):
|
| 25 |
-
"""Add a step with its duration since the last step or start."""
|
| 26 |
-
if self.start_time is None:
|
| 27 |
-
self.start()
|
| 28 |
-
|
| 29 |
-
current_time = time.time()
|
| 30 |
-
if not self.steps:
|
| 31 |
-
elapsed = current_time - self.start_time
|
| 32 |
-
else:
|
| 33 |
-
elapsed = current_time - self.steps[-1]['timestamp']
|
| 34 |
-
|
| 35 |
-
self.steps.append({
|
| 36 |
-
"step_name": step_name,
|
| 37 |
-
"duration": round(elapsed, 4),
|
| 38 |
-
"total_duration": round(current_time - self.start_time, 4),
|
| 39 |
-
"timestamp": current_time
|
| 40 |
-
})
|
| 41 |
-
|
| 42 |
-
def end(self):
|
| 43 |
-
"""End the timer and calculate the total duration."""
|
| 44 |
-
if self.start_time is None:
|
| 45 |
-
raise RuntimeError("Timer has not been started.")
|
| 46 |
-
|
| 47 |
-
if not self.steps:
|
| 48 |
-
raise RuntimeError("No steps have been added.")
|
| 49 |
-
|
| 50 |
-
self.total_time = time.time() - self.start_time
|
| 51 |
-
|
| 52 |
-
def to_json(self):
|
| 53 |
-
"""Return a JSON of the timing steps."""
|
| 54 |
-
if self.total_time is None:
|
| 55 |
-
raise RuntimeError("Timer has not been ended.")
|
| 56 |
-
|
| 57 |
-
output_steps = {}
|
| 58 |
-
for step in self.steps:
|
| 59 |
-
output_steps[step["step_name"]] = step["duration"]
|
| 60 |
-
|
| 61 |
-
highlights = {"total_time": round(self.total_time, 4)}
|
| 62 |
-
|
| 63 |
-
if self.name:
|
| 64 |
-
highlights = {"name": self.name, **highlights}
|
| 65 |
-
|
| 66 |
-
output = {
|
| 67 |
-
**highlights,
|
| 68 |
-
**output_steps
|
| 69 |
-
}
|
| 70 |
-
return output
|
| 71 |
-
|
| 72 |
-
def to_json_str(self):
|
| 73 |
-
"""Return a human-readable JSON of the timing steps."""
|
| 74 |
-
return json.dumps(self.to_json(), indent=4)
|
| 75 |
-
|
| 76 |
-
def formatted_result(self):
|
| 77 |
-
"""Return a list of the steps, their duration, and total duration."""
|
| 78 |
-
if self.total_time is None:
|
| 79 |
-
raise RuntimeError("Timer has not been ended.")
|
| 80 |
-
line_buffer = []
|
| 81 |
-
if self.name:
|
| 82 |
-
line_buffer.append(f"Timer: {self.name}")
|
| 83 |
-
for step in self.steps:
|
| 84 |
-
line_buffer.append(f"[{step['duration']:05.2f}s, {step['total_duration']:05.2f}s] {step['step_name']}")
|
| 85 |
-
# for step in self.steps:
|
| 86 |
-
# line_buffer.append(f"{step['step_name']}: {step['duration']:.2f}s ({step['total_duration']:.2f}s)")
|
| 87 |
-
line_buffer.append(f"Total time: {self.total_time:.2f}s")
|
| 88 |
-
return "\n".join(line_buffer)
|
| 89 |
-
|
| 90 |
-
def log_formatted_result(self):
|
| 91 |
-
print(self.formatted_result())
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
def example():
|
| 95 |
-
# Example usage
|
| 96 |
-
timer = Timer()
|
| 97 |
-
timer.start()
|
| 98 |
-
|
| 99 |
-
# Simulating some steps
|
| 100 |
-
time.sleep(1) # Simulate work for step 1
|
| 101 |
-
timer.add_step("Step 1")
|
| 102 |
-
|
| 103 |
-
time.sleep(2) # Simulate work for step 2
|
| 104 |
-
timer.add_step("Step 2")
|
| 105 |
-
|
| 106 |
-
timer.end()
|
| 107 |
-
|
| 108 |
-
# Print the timer output
|
| 109 |
-
print(timer.formatted_result())
|
| 110 |
-
print(timer.to_json_str())
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
if __name__ == "__main__":
|
| 114 |
-
example()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils.py
DELETED
|
@@ -1,159 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import sys
|
| 3 |
-
import time
|
| 4 |
-
from functools import wraps
|
| 5 |
-
from typing import Any, Literal
|
| 6 |
-
|
| 7 |
-
from gradio import ChatMessage
|
| 8 |
-
from gradio.components.chatbot import Message
|
| 9 |
-
|
| 10 |
-
COMMUNITY_POSTFIX_URL = "/discussions"
|
| 11 |
-
DEBUG_MODE = False or os.environ.get("DEBUG_MODE") == "True"
|
| 12 |
-
DEBUG_MODEL = False or os.environ.get("DEBUG_MODEL") == "True"
|
| 13 |
-
|
| 14 |
-
models_config = {
|
| 15 |
-
"Apriel-1.6-15B-Thinker": {
|
| 16 |
-
"MODEL_DISPLAY_NAME": "Apriel-1.6-15B-Thinker",
|
| 17 |
-
"MODEL_HF_URL": "https://huggingface.co/ServiceNow-AI/Apriel-1.6-15b-Thinker",
|
| 18 |
-
"MODEL_NAME": os.environ.get("MODEL_NAME_APRIEL_1_6_15B"),
|
| 19 |
-
"VLLM_API_URL": os.environ.get("VLLM_API_URL_APRIEL_1_6_15B"),
|
| 20 |
-
"VLLM_API_URL_LIST": os.environ.get("VLLM_API_URL_LIST_APRIEL_1_6_15B"),
|
| 21 |
-
"AUTH_TOKEN": os.environ.get("AUTH_TOKEN"),
|
| 22 |
-
"REASONING": True,
|
| 23 |
-
"MULTIMODAL": True,
|
| 24 |
-
"TEMPERATURE": 0.6,
|
| 25 |
-
"OUTPUT_TAG_START": "[BEGIN FINAL RESPONSE]",
|
| 26 |
-
"OUTPUT_TAG_END": "",
|
| 27 |
-
"OUTPUT_STOP_TOKEN": "<|end|>"
|
| 28 |
-
},
|
| 29 |
-
# "Apriel-1.5-15B-thinker": {
|
| 30 |
-
# "MODEL_DISPLAY_NAME": "Apriel-1.5-15B-thinker",
|
| 31 |
-
# "MODEL_HF_URL": "https://huggingface.co/ServiceNow-AI/Apriel-1.5-15b-Thinker",
|
| 32 |
-
# "MODEL_NAME": os.environ.get("MODEL_NAME_APRIEL_1_5_15B"),
|
| 33 |
-
# "VLLM_API_URL": os.environ.get("VLLM_API_URL_APRIEL_1_5_15B"),
|
| 34 |
-
# "VLLM_API_URL_LIST": os.environ.get("VLLM_API_URL_LIST_APRIEL_1_5_15B"),
|
| 35 |
-
# "AUTH_TOKEN": os.environ.get("AUTH_TOKEN"),
|
| 36 |
-
# "REASONING": True,
|
| 37 |
-
# "MULTIMODAL": True,
|
| 38 |
-
# "TEMPERATURE": 0.6,
|
| 39 |
-
# "OUTPUT_TAG_START": "[BEGIN FINAL RESPONSE]",
|
| 40 |
-
# "OUTPUT_TAG_END": "[END FINAL RESPONSE]",
|
| 41 |
-
# "OUTPUT_STOP_TOKEN": "<|end|>"
|
| 42 |
-
# },
|
| 43 |
-
# "Apriel-Nemotron-15b-Thinker": {
|
| 44 |
-
# "MODEL_DISPLAY_NAME": "Apriel-Nemotron-15b-Thinker",
|
| 45 |
-
# "MODEL_HF_URL": "https://huggingface.co/ServiceNow-AI/Apriel-Nemotron-15b-Thinker",
|
| 46 |
-
# "MODEL_NAME": os.environ.get("MODEL_NAME_NEMO_15B"),
|
| 47 |
-
# "VLLM_API_URL": os.environ.get("VLLM_API_URL_NEMO_15B"),
|
| 48 |
-
# "AUTH_TOKEN": os.environ.get("AUTH_TOKEN"),
|
| 49 |
-
# "REASONING": True,
|
| 50 |
-
# "MULTIMODAL": False
|
| 51 |
-
# },
|
| 52 |
-
# "Apriel-5b": {
|
| 53 |
-
# "MODEL_DISPLAY_NAME": "Apriel-5b",
|
| 54 |
-
# "MODEL_HF_URL": "https://huggingface.co/ServiceNow-AI/Apriel-5B-Instruct",
|
| 55 |
-
# "MODEL_NAME": os.environ.get("MODEL_NAME_5B"),
|
| 56 |
-
# "VLLM_API_URL": os.environ.get("VLLM_API_URL_5B"),
|
| 57 |
-
# "AUTH_TOKEN": os.environ.get("AUTH_TOKEN"),
|
| 58 |
-
# "REASONING": False,
|
| 59 |
-
# "MULTIMODAL": False
|
| 60 |
-
# }
|
| 61 |
-
}
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
def get_model_config(model_name: str) -> dict:
|
| 65 |
-
config = models_config.get(model_name)
|
| 66 |
-
config['MODEL_KEY'] = model_name
|
| 67 |
-
|
| 68 |
-
if not config:
|
| 69 |
-
raise ValueError(f"Model {model_name} not found in models_config")
|
| 70 |
-
if not config.get("MODEL_NAME"):
|
| 71 |
-
raise ValueError(f"Model name not found in config for {model_name}")
|
| 72 |
-
if not config.get("VLLM_API_URL"):
|
| 73 |
-
raise ValueError(f"VLLM API URL not found in config for {model_name}")
|
| 74 |
-
|
| 75 |
-
return config
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
def _log_message(prefix, message, icon=""):
|
| 79 |
-
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
| 80 |
-
if len(icon) > 0:
|
| 81 |
-
icon = f"{icon} "
|
| 82 |
-
print(f"{timestamp}: {prefix} {icon}{message}")
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
def log_debug(message):
|
| 86 |
-
if DEBUG_MODE is True:
|
| 87 |
-
_log_message("DEBUG", message)
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
def log_info(message):
|
| 91 |
-
_log_message("INFO ", message)
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
def log_warning(message):
|
| 95 |
-
_log_message("WARN ", message, "⚠️")
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
def log_error(message):
|
| 99 |
-
_log_message("ERROR", message, "‼️")
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
# Gradio 5.0.1 had issues with checking the message formats. 5.29.0 does not!
|
| 103 |
-
def check_format(messages: Any, type: Literal["messages", "tuples"] = "messages") -> None:
|
| 104 |
-
if not DEBUG_MODE:
|
| 105 |
-
return
|
| 106 |
-
|
| 107 |
-
if type == "messages":
|
| 108 |
-
all_valid = all(
|
| 109 |
-
isinstance(message, dict)
|
| 110 |
-
and "role" in message
|
| 111 |
-
and "content" in message
|
| 112 |
-
or isinstance(message, ChatMessage | Message)
|
| 113 |
-
for message in messages
|
| 114 |
-
)
|
| 115 |
-
if not all_valid:
|
| 116 |
-
# Display which message is not valid
|
| 117 |
-
for i, message in enumerate(messages):
|
| 118 |
-
if not (isinstance(message, dict) and
|
| 119 |
-
"role" in message and
|
| 120 |
-
"content" in message) and not isinstance(message, ChatMessage | Message):
|
| 121 |
-
print(f"_check_format() --> Invalid message at index {i}: {message}\n", file=sys.stderr)
|
| 122 |
-
break
|
| 123 |
-
|
| 124 |
-
raise Exception(
|
| 125 |
-
"Data incompatible with messages format. Each message should be a dictionary with 'role' and 'content' keys or a ChatMessage object."
|
| 126 |
-
)
|
| 127 |
-
# else:
|
| 128 |
-
# print("_check_format() --> All messages are valid.")
|
| 129 |
-
elif not all(
|
| 130 |
-
isinstance(message, (tuple, list)) and len(message) == 2
|
| 131 |
-
for message in messages
|
| 132 |
-
):
|
| 133 |
-
raise Exception(
|
| 134 |
-
"Data incompatible with tuples format. Each message should be a list of length 2."
|
| 135 |
-
)
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
# Adds timing info for a gradio event handler (non-generator functions)
|
| 139 |
-
def logged_event_handler(log_msg='', event_handler=None, log_timer=None, clear_timer=False):
|
| 140 |
-
@wraps(event_handler)
|
| 141 |
-
def wrapped_event_handler(*args, **kwargs):
|
| 142 |
-
# Log before
|
| 143 |
-
if log_timer:
|
| 144 |
-
if clear_timer:
|
| 145 |
-
log_timer.clear()
|
| 146 |
-
log_timer.add_step(f"Start: {log_debug}")
|
| 147 |
-
log_debug(f"::: Before event: {log_msg}")
|
| 148 |
-
|
| 149 |
-
# Call the original event handler
|
| 150 |
-
result = event_handler(*args, **kwargs)
|
| 151 |
-
|
| 152 |
-
# Log after
|
| 153 |
-
if log_timer:
|
| 154 |
-
log_timer.add_step(f"Completed: {log_msg}")
|
| 155 |
-
log_debug(f"::: After event: {log_msg}")
|
| 156 |
-
|
| 157 |
-
return result
|
| 158 |
-
|
| 159 |
-
return wrapped_event_handler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|