File size: 10,761 Bytes
6e8ca12 5bc353f 6e8ca12 5bc353f 6e8ca12 5bc353f 6e8ca12 5bc353f 6e8ca12 5bc353f 6e8ca12 c502b05 6e8ca12 5bc353f 6e8ca12 5bc353f 6e8ca12 5bc353f 6e8ca12 5bc353f b84bed8 c502b05 b84bed8 c502b05 b84bed8 c502b05 b84bed8 5bc353f b84bed8 5bc353f b84bed8 5bc353f b84bed8 5bc353f b84bed8 5bc353f c502b05 b84bed8 5bc353f 6e8ca12 b84bed8 6e8ca12 b84bed8 5bc353f 6e8ca12 5bc353f 6e8ca12 b84bed8 5bc353f b84bed8 5bc353f c502b05 5bc353f b84bed8 5bc353f b84bed8 5bc353f b84bed8 5bc353f 6e8ca12 b84bed8 6e8ca12 5bc353f b84bed8 5bc353f b84bed8 15dc377 b84bed8 c502b05 5eed9b0 c502b05 b84bed8 6e8ca12 b84bed8 6e8ca12 d9a9b75 5bc353f 6e8ca12 15dc377 c502b05 b84bed8 c502b05 b84bed8 c502b05 6e8ca12 b84bed8 6e8ca12 b84bed8 6e8ca12 5bc353f c502b05 6e8ca12 b84bed8 5bc353f 6e8ca12 15dc377 6e8ca12 b84bed8 6e8ca12 006fc23 6e8ca12 b84bed8 5bc353f 6e8ca12 15dc377 6e8ca12 b84bed8 6e8ca12 c502b05 6e8ca12 15dc377 b84bed8 5bc353f 15dc377 b84bed8 6e8ca12 b84bed8 6e8ca12 b84bed8 6e8ca12 b84bed8 6e8ca12 006fc23 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 |
from __future__ import annotations
import os
from typing import Any, Dict, List, Tuple
import gradio as gr
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
# ----------------------
# Config
# ----------------------
MODEL_ID = os.getenv("MODEL_ID", "microsoft/UserLM-8b")
DEFAULT_SYSTEM_PROMPT = (
"You are a user who wants to implement a special type of sequence. "
"The sequence sums up the two previous numbers in the sequence and adds 1 to the result. "
"The first two numbers in the sequence are 1 and 1."
)
def load_model(model_id: str = MODEL_ID):
"""Load tokenizer and model, with a reasonable dtype and device fallback."""
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
model_id,
trust_remote_code=True,
torch_dtype="auto",
device_map="auto",
)
# Special tokens for stopping / filtering
end_token = "<|eot_id|>"
end_conv_token = "<|endconversation|>"
end_token_ids = tokenizer.encode(end_token, add_special_tokens=False)
end_conv_token_ids = tokenizer.encode(end_conv_token, add_special_tokens=False)
# Guardrail 1: Problematic first tokens that cause repetition (from Appendix C.1)
problematic_tokens = ["I", "You", "Here", "i", "you", "here"]
first_token_filter_ids = []
for token in problematic_tokens:
token_ids = tokenizer.encode(token, add_special_tokens=False)
if len(token_ids) > 0:
first_token_filter_ids.append(token_ids[0])
eos_token_id = (
end_token_ids[0] if len(end_token_ids) > 0 else tokenizer.eos_token_id
)
bad_words_ids = (
[[tid] for tid in end_conv_token_ids] if len(end_conv_token_ids) > 0 else None
)
return tokenizer, model, eos_token_id, bad_words_ids, first_token_filter_ids
tokenizer, model, EOS_TOKEN_ID, BAD_WORDS_IDS, FIRST_TOKEN_FILTER_IDS = load_model()
model.eval()
# ----------------------
# Generation helper
# ----------------------
def build_messages(
system_prompt: str, history: List[Tuple[str, str]]
) -> List[Dict[str, str]]:
"""Transform Gradio history into chat template messages.
History is stored as (model_user, human_assistant) tuples.
"""
messages: List[Dict[str, str]] = []
if system_prompt.strip():
messages.append({"role": "system", "content": system_prompt.strip()})
# Each tuple is (model_user, human_assistant)
for model_user, human_assistant in history:
if model_user:
messages.append({"role": "user", "content": model_user})
if human_assistant:
messages.append({"role": "assistant", "content": human_assistant})
return messages
def apply_first_token_filter(
logits: torch.Tensor, filter_ids: List[int]
) -> torch.Tensor:
"""Apply logit filter for problematic first tokens (Guardrail 1)."""
logits_filtered = logits.clone()
for token_id in filter_ids:
logits_filtered[0, -1, token_id] = float("-inf")
return logits_filtered
def is_valid_length(text: str, min_words: int = 3, max_words: int = 50) -> bool:
"""Check if generated text meets length requirements (Guardrail 3).
Paper used max_words=25 for their simulation experiments, but we use 50
for interactive demo to allow slightly longer responses while still preventing
the model from revealing the entire intent at once.
"""
word_count = len(text.split())
return min_words <= word_count <= max_words
def is_verbatim_repetition(
new_text: str, history: List[Tuple[str, str]], system_prompt: str
) -> bool:
"""Check if text is exact repetition of prior user turn or system prompt (Guardrail 4)."""
new_text_normalized = new_text.strip().lower()
# Check against system prompt
if new_text_normalized == system_prompt.strip().lower():
return True
# Check against previous model user messages (first element in tuple)
for model_user, _ in history:
if model_user and new_text_normalized == model_user.strip().lower():
return True
return False
@spaces.GPU
def generate_reply(
messages: List[Dict[str, str]],
history: List[Tuple[str, str]],
system_prompt: str,
max_new_tokens: int = 256,
temperature: float = 1.0,
top_p: float = 0.8,
max_retries: int = 5,
) -> str:
"""Run generation with guardrails from Appendix C.1.
Implements all 4 guardrails from the paper:
1. Filter problematic first tokens
2. Optionally avoid dialogue termination (disabled by default for demo)
3. Enforce length thresholds with retry
4. Filter verbatim repetitions with retry
"""
for attempt in range(max_retries):
# Prepare input ids using the model's chat template
inputs = tokenizer.apply_chat_template(
messages,
return_tensors="pt",
add_generation_prompt=True,
).to(model.device)
with torch.no_grad():
outputs = model.generate(
input_ids=inputs,
do_sample=True,
top_p=top_p,
temperature=temperature,
max_new_tokens=max_new_tokens,
eos_token_id=EOS_TOKEN_ID,
pad_token_id=tokenizer.eos_token_id,
bad_words_ids=BAD_WORDS_IDS, # Prevents <|endconversation|>
)
# Slice off the prompt tokens to get only the new text
generated = outputs[0][inputs.shape[1] :]
text = tokenizer.decode(generated, skip_special_tokens=True).strip()
# Apply guardrails - retry if checks fail
if not is_valid_length(text):
continue
if is_verbatim_repetition(text, history, system_prompt):
continue
# Success - return the valid text
return text
# If all retries failed, raise an error
raise RuntimeError(
f"Failed to generate valid response after {max_retries} attempts"
)
# ----------------------
# Gradio UI callbacks
# ----------------------
def respond(
assistant_message: str,
chat_history: List[Tuple[str, str]],
system_prompt: str,
max_new_tokens: int,
temperature: float,
top_p: float,
):
"""Generate next user turn.
Flow:
- If history empty: Generate first user message (ignores assistant_message input)
- If history exists: Add assistant response and generate next user turn
History format: (model_user, human_assistant)
"""
# First message generation - ignore any text in the assistant box
if len(chat_history) == 0:
# Generate initial user message from system prompt alone
messages = build_messages(system_prompt, [])
user_reply = generate_reply(
messages,
chat_history,
system_prompt,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
)
# Start conversation with first user message (empty assistant slot)
chat_history = [(user_reply, None)]
return chat_history, chat_history
# Subsequent messages - require assistant response
if not assistant_message.strip():
# User clicked generate without providing assistant response
gr.Info(
"Please type your assistant response before generating the next user message."
)
return chat_history, chat_history
# Update the last tuple with the assistant response
last_model_user, _ = chat_history[-1]
chat_history[-1] = (last_model_user, assistant_message.strip())
# Build messages for next user turn generation
messages = build_messages(system_prompt, chat_history)
user_reply = generate_reply(
messages,
chat_history,
system_prompt,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
)
# Add new model user message (with empty assistant slot)
chat_history.append((user_reply, None))
return chat_history, chat_history
def clear_state():
return [], DEFAULT_SYSTEM_PROMPT
# ----------------------
# Build the Gradio App
# ----------------------
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown(
f"""
# UserLM-8b: User Language Model Demo
**Model:** `{MODEL_ID}`
The AI plays the user, you play the assistant.
"""
)
with gr.Row():
system_box = gr.Textbox(
label="User Intent",
value=DEFAULT_SYSTEM_PROMPT,
lines=3,
placeholder="Enter the user's goal or intent",
)
chatbot = gr.Chatbot(
height=420,
label="Conversation",
)
with gr.Row():
msg = gr.Textbox(
label="Assistant Response",
placeholder="Leave empty for first generation, then type your responses",
lines=2,
)
with gr.Accordion("Generation Settings", open=False):
max_new_tokens = gr.Slider(16, 512, value=256, step=16, label="max_new_tokens")
temperature = gr.Slider(0.0, 2.0, value=1.0, step=0.05, label="temperature")
top_p = gr.Slider(0.0, 1.0, value=0.8, step=0.01, label="top_p")
with gr.Row():
submit_btn = gr.Button("Generate", variant="primary")
clear_btn = gr.Button("Clear")
state = gr.State([]) # chat history: List[Tuple[model_user, human_assistant]]
with gr.Accordion("Implementation Details", open=False):
gr.Markdown(
"""
Based on Appendix C.1 of the UserLM paper:
- Sampling: temp=1.0, top_p=0.8
- First token filtering for problematic tokens
- Length constraints: 3-50 words
- Repetition filtering
"""
)
def _submit(asst_text, history, system_prompt, mnt, temp, tp):
new_history, visible = respond(asst_text, history, system_prompt, mnt, temp, tp)
# Clear input box after submission
return "", visible
submit_btn.click(
fn=_submit,
inputs=[msg, state, system_box, max_new_tokens, temperature, top_p],
outputs=[msg, chatbot],
)
msg.submit(
fn=_submit,
inputs=[msg, state, system_box, max_new_tokens, temperature, top_p],
outputs=[msg, chatbot],
)
# Keep state in sync with the visible Chatbot
def _sync_state(chat):
return chat
chatbot.change(_sync_state, inputs=[chatbot], outputs=[state])
def _clear():
history, sys = clear_state()
return history, sys, history, ""
clear_btn.click(_clear, outputs=[state, system_box, chatbot, msg])
if __name__ == "__main__":
demo.queue().launch()
|