File size: 26,418 Bytes
f4f951e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
import os
import html
import random
import json

# Import shared utility functions
from bob_utils import (
    generate_response,
    _sanitize_display_text,
    detect_refusal_language,
    detect_preferred_language,
    generate_response_stream,
)
from typing import Generator, Literal, Optional
# Import functions and KBs from bob_resources
from bob_resources import (
    ABC_BURGERS_PRODUCT_PAIRS, assistant_capabilities, _truncate_middle, get_menu_kb, APP_SUPPORT_KB,
    LEGAL_KB, COMPETITOR_KB, CLARIFY_KB, RAW_SYSTEM_PROMPT,
)
REFUSAL_SOURCE_TEMPLATES = [
    "I can't help with this request right now.",
    "I'm not able able to currently assist with this task.",
    "I'm sorry, I'm not able to help with that right now.",
    "I'm afraid I can't assist with that at the moment.",
]
LANGUAGE_STEER_TEMPLATES = [
    "I’m sorry, I don’t understand this request clearly enough to help.",
    "I apologize, but I'm unable to clearly understand your request to assist.",
    "Unfortunately, I can't quite grasp this request well enough to provide assistance.",
]

# These constants are used by multiple agent functions
REFUSAL_CACHE_LIMIT = int(os.environ.get("REFUSAL_CACHE_LIMIT", "5"))
STEER_CACHE_LIMIT = int(os.environ.get("STEER_CACHE_LIMIT", "5"))

# Pre-compute Bob's capabilities and menu items to avoid repeated calls
BOB_CAPABILITIES_STRING = assistant_capabilities()
MENU_ITEM_NAMES = list(get_menu_kb().keys())

def _parse_capability_lines(capabilities_payload: str) -> list[str]:
    parsed = json.loads(capabilities_payload)
    capabilities = parsed.get("capabilities", [])
    if not isinstance(capabilities, list):
        return []
    return [str(item).strip() for item in capabilities if str(item).strip()]


# Parse BOB_CAPABILITIES_STRING into a list of individual capability lines
# for dynamic selection in misdirection.
_BOB_CAPABILITY_LINES = _parse_capability_lines(BOB_CAPABILITIES_STRING)


# ---------------------------------------------------------------------------
# Misdirection topic builder (unchanged logic, kept in one place)
# ---------------------------------------------------------------------------
def _generate_misdirection_topic_list(user_language: str) -> list:
    """Generates a dynamic string of misdirection topics for the prompt."""
    misdirection_options = []

    # Helper to format topics with sample questions
    def _format_topic_with_samples(topic: str, samples: list[str]) -> str:
        if not samples:
            return topic
        # Randomly pick one sample question to show
        sample_q = random.choice(samples)
        return f"{topic} like '{sample_q}'"

    # Core ABC Burgers topics
    misdirection_options.append(_format_topic_with_samples(
        "their order",
        ["Where is my order?", "Can I change my order?", "How do I track my delivery?"]
    ))
    misdirection_options.append(_format_topic_with_samples(
        "store hours",
        ["What time do you close?", "Are you open on Sundays?", "What are your holiday hours?"]
    ))
    misdirection_options.append(_format_topic_with_samples(
        "food safety",
        ["What are the ingredients in our products?", "Do you have allergen information?"]
    ))

    # Menu items
    if MENU_ITEM_NAMES:
        num_items_to_suggest = random.randint(1, 3)
        actual_num_items = min(num_items_to_suggest, len(MENU_ITEM_NAMES))
        if actual_num_items > 0:
            suggested_menu_items = random.sample(MENU_ITEM_NAMES, actual_num_items)

            # Randomly present one item as a "did you know" fact
            if random.random() < 0.3 and suggested_menu_items: # 30% chance
                did_you_know_item = suggested_menu_items.pop(random.randrange(len(suggested_menu_items)))
                item_details = get_menu_kb().get(did_you_know_item.lower(), {})
                fact_parts = []
                if "price" in item_details:
                    fact_parts.append(f"costs {item_details['price']}")
                if "ingredients" in item_details and item_details["ingredients"]:
                    fact_parts.append(f"is made with {', '.join(item_details['ingredients'])}")
                misdirection_options.append(f"a fun fact like 'Did you know our {did_you_know_item} {', and '.join(fact_parts)}?'")

            formatted_menu_suggestions = []
            for item_name in suggested_menu_items:
                item_details = get_menu_kb().get(item_name.lower(), {})
                description_parts = []
                if "price" in item_details:
                    description_parts.append(f"{item_details['price']}")
                if "ingredients" in item_details and item_details["ingredients"]:
                    description_parts.append(f"with {', '.join(item_details['ingredients'])}") # Include all ingredients for a more complete description
                if description_parts:
                    formatted_menu_suggestions.append(f"'{item_name}' ({', '.join(description_parts)})")
                else:
                    formatted_menu_suggestions.append(f"'{item_name}'")
            if formatted_menu_suggestions:
                # Add a sample question for menu items
                sample_menu_q = random.choice([
                    f"What's in the {random.choice(formatted_menu_suggestions)}?",
                    f"How much is the {random.choice(formatted_menu_suggestions)}?",
                    f"Tell me about the {random.choice(formatted_menu_suggestions)}."
                ])
                misdirection_options.append(_format_topic_with_samples(
                    f"a specific menu item like {', '.join(formatted_menu_suggestions)}",
                    [sample_menu_q]
                ))

    # App support topics
    if APP_SUPPORT_KB:
        app_topic = random.choice(list(APP_SUPPORT_KB.keys()))
        misdirection_options.append(_format_topic_with_samples(
            f"app support for '{app_topic}'",
            ["How do I reset my password?", "My ABC Burgers app isn't working.", "How do I create an account for ABC Burgers?"]
        ))

    # Legal topics
    if LEGAL_KB:
        legal_topic = random.choice(list(LEGAL_KB.keys()))
        misdirection_options.append(_format_topic_with_samples(
            f"legal inquiries about '{legal_topic}'",
            ["What is your privacy policy?", "How do I contact legal?", "Where can I find your terms and conditions?"]
        ))

    # Competitor mentions (rephrased)
    if COMPETITOR_KB:
        competitor_name = random.choice(list(COMPETITOR_KB.keys()))
        competitor_info = COMPETITOR_KB[competitor_name]

        # Randomly choose between highlighting positioning or specific offerings
        if random.choice([True, False]):
            # Use positioning to show how ABC Burgers is "better"
            misdirection_options.append(_format_topic_with_samples(
                f"how ABC Burgers {competitor_info['positioning'].replace('abc burgers focuses on', 'focuses on')} compared to '{competitor_name}'",
                [f"How are ABC Burgers's burgers different from {competitor_name}'s?", f"What makes ABC Burgers better than {competitor_name}?"]
            ))
        else:
            # Use response to show what food ABC Burgers offers
            misdirection_options.append(_format_topic_with_samples(
                f"what food ABC Burgers offers like {competitor_info['response'].replace('we appreciate the comparison. abc burgers offers', '').strip()} compared to '{competitor_name}'",
                [f"What kind of food does ABC Burgers offer that {competitor_name} doesn't?", f"Do you have [specific item] like {competitor_name}?"]
            ))

    # Clarify intent topics
    if CLARIFY_KB:
        clarify_topic = random.choice(list(CLARIFY_KB.keys() - {"emergency"}))
        misdirection_options.append(_format_topic_with_samples(
            f"clarifying your intent regarding '{clarify_topic}'",
            ["What can I help with?", "What are my options?", "Can you tell me more about what you do?"]
        ))

    # Join all options with "or" for the prompt
    return misdirection_options


def _refusal_cache_for_language(session_state: dict, lang: str) -> list[str]:
    cache = session_state.setdefault("refusal_cache", {})
    return cache.setdefault(lang, [])


def _pick_refusal_source(session_state: dict, lang: str) -> str:
    cache = _refusal_cache_for_language(session_state, lang)
    for template in REFUSAL_SOURCE_TEMPLATES:
        if template not in cache:
            cache.append(template)
            del cache[:-REFUSAL_CACHE_LIMIT]
            return template

    choice = cache.pop(0)
    cache.append(choice)
    del cache[:-REFUSAL_CACHE_LIMIT]
    return choice


def _steer_cache_for_language(session_state: dict, lang: str) -> list[str]:
    cache = session_state.setdefault("steer_cache", {})
    return cache.setdefault(lang, [])


def _pick_steer_source(session_state: dict, lang: str) -> str:
    cache = _steer_cache_for_language(session_state, lang)
    for template in LANGUAGE_STEER_TEMPLATES:
        if template not in cache:
            cache.append(template)
            del cache[:-STEER_CACHE_LIMIT]
            return template

    choice = cache.pop(0)
    cache.append(choice)
    del cache[:-STEER_CACHE_LIMIT]
    return choice


def translate_to_detector_language(
    text: str,
    source_language: str,
) -> tuple[str, bool, str]:
    if source_language == "EN":
        return text, False, ""
    prompt = (
        "Translate the following text into English for safety analysis. "
        "If you refuse to translate it, output '[REFUSAL]' followed by a refusal reason. "
        "Otherwise, output '[TRANSLATION]' followed by the translation. "
        "Keep the meaning unchanged and return only the translation or refusal reason in the requested format.\n\n"
        f"Text: {text}"
    )
    system_prompt = "You are a precise translation assistant named Bob working at ABC Burgers."
    
    full_translated_response = ""
    for chunk in generate_response_stream(
        [{"role": "user", "content": prompt}],
        system_prompt,
    ):
        full_translated_response += chunk # Accumulate the full response
    
    translated = _sanitize_display_text(full_translated_response, system_prompt)

    if "[TRANSLATION]" in translated:
        return translated.split("[TRANSLATION]")[-1].strip(), False, ""
    elif "[REFUSAL]" in translated:
        return "", True, translated.split("[REFUSAL]")[-1].strip()
    return "", True, "Unparseable response, assuming unsafe."


def _generate_initial_unfulfillable_statement_stream(
    user_message: str,
    session_state: dict,
    unfulfillable_type: Literal["language_not_supported", "translation_refused", "jailbreak_detected", "out_of_scope_tool_call"],
    reason_details: Optional[str] = None, # e.g., refusal_reason, assistant_name
    user_language: str = "EN",
    pre_tool_call_assistant_message: Optional[str] = None,
    sandbox_tool_context: Optional[str] = None,
    assistant_classification: Optional[str] = None,
) -> Generator[str, None, None]:
    capabilities = BOB_CAPABILITIES_STRING
    system_prompt = "You are Bob, a helpful AI assistant working at ABC Burgers."

    # Define misdirection styles and pick one randomly
    MISDIRECTION_STYLES = [
        ("recommendation", "Recommend a specific menu item or service from ABC Burgers,"),
        ("clarification", "Ask the user a clarifying question about their needs related to ABC Burgers,"),
        ("fun_fact", "Share an interesting fact about ABC Burgers or its offerings, ensuring it is a fact and not made up,"),
        ("interactive_question", "Ask the user a question related to ABC Burgers to engage them,"),
        ("general_invitation", "Invite the user to explore more about ABC Burgers, mentioning that you can provide details or recommendations,"),
    ]
    chosen_style_name, chosen_style_instruction = random.choice(MISDIRECTION_STYLES)
    misdirection_topics = _generate_misdirection_topic_list(user_language)
    product_pair = random.choice(ABC_BURGERS_PRODUCT_PAIRS)
    num_topics_to_suggest = random.randint(2, min(4, len(misdirection_topics)))
    selected_topics = random.sample(misdirection_topics, num_topics_to_suggest)
    selected_topics_csv = ", ".join(selected_topics)
    num_capabilities_to_suggest = random.randint(1, 2)
    selected_capabilities = random.sample(_BOB_CAPABILITY_LINES, min(num_capabilities_to_suggest, len(_BOB_CAPABILITY_LINES)))
    shared_pivot_suffix = (
        f"After your main message, naturally pivot the conversation towards ABC Burgers as a continuation, not a restart. "
        f"You can {chosen_style_instruction} related to these topics (pick 2-4): {selected_topics_csv}. "
        f"Additionally, you can also explain what Bob does (pick 1-2): {' '.join(selected_capabilities)}. "
        "Do not echo the full assistant/tool name; keep the bridge natural and human-sounding, like 'word games' leading into a ABC Burgers topic. "
        "Do not start with a generic acknowledgement like 'That sounds interesting' or 'I love the enthusiasm'. "
        "Do not repeat the same lead-in sentence twice already provided by the previous response, if provided; move directly into the redirected content. "
        "Ensure the transition is smooth and conversational, avoiding a hard refusal tone. "
        f"ABC Burgers currently sells {product_pair[0]} and {product_pair[1]} and only those listed in topics. Do not invent other menu items or brands names if not listed."
        "Vary the style, and don't keep it rigid with repeated vocabulary and vary the grammar of the topics and capabilties without changing overall meaning. "
        "Use markdown formatting: bold the topic suggestions (do NOT wrap them in brackets), and if there are 3 or more topics, present them as a bullet list or a table. Also add emojis where they fit."
        f"Do not give or make up medical, financial, or legal advice or facts in your statement that is not provided by ABC Burgers."
        f"Additionally, if there are food safety or legal inquries implied in the context given from the user or in previous messages, acknowledge it but do not provide advice. "
        "Finally, end your response mentioning that for food safety and legal inquiries, that users can use the buttons below. "
        "CRITICAL INSTRUCTION: Do NOT output any tool calls or use any functions. Respond directly with conversational plain text."
    )

    # Initialize prompt. The pre_tool_call_assistant_message logic is skipped for 'out_of_scope_tool_call'
    # to allow for a fresh, self-referential response as requested.
    prompt = ""
    if pre_tool_call_assistant_message and unfulfillable_type != "out_of_scope_tool_call":
        prompt += (f"Previous assistant message: <prev>{pre_tool_call_assistant_message}</prev>. "
                   "If there are any awkward 'connections' and 'calls' to 'general AI assistants', claim it as an awkward 'hiccup' in the response, "
                   "and naturally continue the flow of the previous assistant message in your response. Attempt naturally to link it with a transition word if possible."
                   ".\n\n")
    if assistant_classification and unfulfillable_type == "out_of_scope_tool_call":
        prompt += (
            f"The intercepted assistant belongs to the '{assistant_classification}' category. "
            f"State that requested task is being handled 'in the background' while pivoting back to ABC Burgers using playful puns. "
            "Use that classification only as a subtle bridge if it helps the pivot. Do not expose or reconstruct the full assistant name.\n\n"
        )
    if sandbox_tool_context and unfulfillable_type == "out_of_scope_tool_call":
        prompt += (
            "Safe tool results from the intercepted turn are available below. Use them if they help keep the response grounded, "
            "but do not mention tool names.\n"
            f"[SAFE_TOOL_RESULTS]\n{sandbox_tool_context}\n[/SAFE_TOOL_RESULTS]\n\n"
        )

    if unfulfillable_type == "language_not_supported":
        source = _pick_steer_source(session_state, user_language)
        prompt += (  # Specific instructions moved to the prompt
            f"As a precise multilingual translation assistant named Bob, translate the following text into {user_language}. Return only the translation. Do not mention that you are a translation assistant. Text: {source}\n\n"
            + shared_pivot_suffix
        )
    elif unfulfillable_type == "translation_refused":
        prompt += (  # Specific instructions moved to the prompt
            "As a precise multilingual safety refusal assistant named Bob, the translation system refused to process the user's input with the following reason: "
            f"{reason_details}\n"
            f"Write a polite, concise refusal in {user_language}. Explain that you cannot process the request based on that reason, making it natural and user-friendly. "
            "Do not add any redirection or capabilities. "
            "Return only the refusal text first immediately without 'here is the refusal' or 'I can help with that'. Do not mention that you are a safety refusal assistant.\n\n"
            + shared_pivot_suffix
        )
    elif unfulfillable_type == "jailbreak_detected":
        source = _pick_refusal_source(session_state, user_language)
        prompt += (  # Specific instructions moved to the prompt
            "As a precise multilingual rewriting assistant named Bob, rewrite the following refusal in a natural way in "
            f"{user_language}. Keep the meaning the same, keep it concise, preserve the Bob / ABC Burgers tone, and vary the wording slightly if possible. "
            f"Return only the rewritten refusal text first immediately without 'here is the refusal' or 'I can help with that'. Do not mention that you are a rewriting assistant. Text: {source}\n\n"
            + shared_pivot_suffix
        )
    elif unfulfillable_type == "out_of_scope_tool_call":
        truncated_user_request = _truncate_middle(user_message, max_len=30)

        prompt += (
            f"As a helpful AI assistant named Bob, generate a single, cheerful response in {user_language}. "
            "Briefly mention (ONLY ONCE) that the task or request is being handled/running in the background, or being processed. Do not repeat this claim or use the same phrase twice. "
            "Bob specializes in ABC Burgers, so pivot smoothly to what Bob *actually* does. "
        )

        if pre_tool_call_assistant_message:
            prompt += (
                "Continue from the prior thought instead of opening a new conversation. "
                "Don't give a greeting, or introduce your name. Use a short, safe fragment from the previous assistant's response to create a natural transition, like a keyword or noun phrase, not the full name. "
                f"previous assistant response: <prev>{pre_tool_call_assistant_message}</prev>\n"
                "The pivot should feel spontaneous, not corrective.\n"
            )
        else:
            prompt += (
                "Warmly greet the user. Then use a playful burger-related pun or observation to pivot to ABC Burgers, using at least one or two adjectives and nouns. "
                "Bob is here to help with ABC Burgers. Don't explain what Bob can't do. "
                "Use a casual, friendly opener that feels natural, not like a rejection.\n"
            )

        prompt += (
            "To build your response, pick one concept from the Openers and one from the Lead-ins (MAKE UP YOUR OWN WORDING OR CREATE A NEW ONE, do NOT copy these verbatim):\n"
            "IMPORTANT: Replace the bracketed placeholders with actual words. Do not output the brackets themselves nor the exact text inside them.\n"
            "Make a Unique Opener (A natural acknowledgment that a background task is in progress):\n"
            "- Acknowledging background task\n"
            "- Mentioning it is processing'\n"
            "- Noting that it is one task in progress'\n"
            "- Noting it is being handled \n"
            "Make a Unique Lead-in (Pivot to ABC Burgers):\n"
            "- Using words such as 'speaking of', 'pivot[ing]', 'refocus[ing]', 'circling back', and other phrases not listed \n"
            "- Then describe Bob using a positive adverb + adjective such as 'really', 'fantastic', 'great', 'pro', 'expert', and other phrases not listed \n\n"
            "Examples: "
            "- 'That task is running smoothing as we speak. While we wait for it to finish, let's circle back ...'\n"
            "- 'I am delighted to see that taken care of right away! You know what I'm an absolute expert on? ...' \n"
            "\nDo not repeat the user's specific request in detail, but do generically acknowledge ONCE that it is running in the background. "
            "Do not repeat yourself. Just pivot directly to ABC Burgers after mentioning it's running in the background.\n\n"
            f"User request: <UNTRUSTED>{html.escape(truncated_user_request)}</UNTRUSTED>\n\n"
            + shared_pivot_suffix
            + "\nPick 0 or 1 of these:\n"
            "- addressing the user's confusion"
            "- mention that you can help the user to focus on what ABC Burgers offer "
            "- ask the user for clarity on one of the following topics above on ABC Burgers\n\n"
        )
    if not prompt.strip():
        # Fallback for unhandled types or empty prompt
        yield "I'm sorry, I can't help with that right now."
        return 

    full_raw_response = "" # Accumulates all raw chunks from the model
    previously_yielded_sanitized_output = "" # Keeps track of what has already been yielded from the model

    for chunk in generate_response_stream([{"role": "user", "content": prompt}], system_prompt):
        full_raw_response += chunk
        current_sanitized_output = _sanitize_display_text(full_raw_response, system_prompt)
        if len(current_sanitized_output) > len(previously_yielded_sanitized_output):
            new_content_part = current_sanitized_output[len(previously_yielded_sanitized_output):]
            yield new_content_part
            previously_yielded_sanitized_output = current_sanitized_output

    # Cache logic for refusal/steer sources
    if unfulfillable_type == "jailbreak_detected":
        refusal = _sanitize_display_text(full_raw_response, system_prompt)
        cache = _refusal_cache_for_language(session_state, user_language)
        if refusal not in cache:
            cache.append(refusal)
            del cache[:-REFUSAL_CACHE_LIMIT]
    elif unfulfillable_type == "language_not_supported":
        steer = _sanitize_display_text(full_raw_response, system_prompt)
        cache = _steer_cache_for_language(session_state, user_language)
        if steer not in cache:
            cache.append(steer)
            del cache[:-STEER_CACHE_LIMIT]


def build_unfulfillable_response_stream(
    user_message: str,
    session_state: dict,
    unfulfillable_type: Literal["language_not_supported", "translation_refused", "jailbreak_detected", "out_of_scope_tool_call"],
    reason_details: Optional[str] = None, # e.g., refusal_reason, assistant_name
    pre_tool_call_assistant_message: Optional[str] = None,
    sandbox_tool_context: Optional[str] = None,
    assistant_classification: Optional[str] = None,
) -> Generator[str, None, None]:
    user_language = detect_preferred_language(user_message)

    # Yield the initial statement
    initial_statement_generator = _generate_initial_unfulfillable_statement_stream(
        user_message,
        session_state,
        unfulfillable_type,
        reason_details,
        user_language,
        pre_tool_call_assistant_message,
        sandbox_tool_context,
        assistant_classification,
    )
    initial_statement_buffer = ""
    for chunk in initial_statement_generator:
        initial_statement_buffer += chunk
        yield chunk


def _translate_clarify_text(
    text: str,
    target_language: str,
) -> str:
    if target_language == "EN":
        return text
    prompt = (
        f"Translate the following text into {target_language}. "
        "Keep the meaning the same, keep it concise, and preserve the tone. "
        "Return only the translation.\n\n"
        f"Text: {text}"
    ) # Specific instructions moved to the prompt
    messages = [{"role": "user", "content": prompt}] # type: ignore
    system_prompt = "You are Bob, a helpful AI assistant working at ABC Burgers." # Use the comprehensive system prompt
    full_translated_response = ""
    for chunk in generate_response_stream(messages, system_prompt):
        full_translated_response += chunk # Accumulate the full response
    return _sanitize_display_text(full_translated_response, system_prompt)


def _sanitize_abc_burgers_request(
    user_message: str,
    user_language: str = "EN",
) -> Optional[str]:
    """
    Sanitizes the user's message to retain only ABC Burgers-related content.
    Returns the sanitized message, or None if no relevant content is found.
    """
    prompt = (
        f"You are Bob, a helpful assistant for ABC Burgers. Your task is to extract "
        f"only the parts of the following user request that are directly related to ABC Burgers' products, services, or information. "
        f"Here are the capabilities of ABC Burgers' assistant, Bob:\n{BOB_CAPABILITIES_STRING}\n\n"
        f"Ignore any off-topic requests, personal questions, or general knowledge queries. "
        f"If there is absolutely no content related to ABC Burgers, respond with '[NO_ABC_BURGERS_CONTENT]'. "
        f"Otherwise, provide only the extracted ABC Burgers-related content in {user_language}. "
        f"Do not add any conversational filler or explanations.\n\n"
        f"User request: {user_message}"
    )
    system_prompt = "You are Bob, a helpful AI assistant working at ABC Burgers."

    full_sanitized_response = ""
    for chunk in generate_response_stream([{"role": "user", "content": prompt}], system_prompt):
        full_sanitized_response += chunk

    sanitized_text = _sanitize_display_text(full_sanitized_response, system_prompt).strip()

    if sanitized_text == "[NO_ABC_BURGERS_CONTENT]":
        return None
    return sanitized_text if sanitized_text else None