anaspro
commited on
Commit
·
5e51f7c
1
Parent(s):
55612d9
update
Browse files
app.py
CHANGED
|
@@ -23,7 +23,7 @@ MAX_INPUT_TOKENS = int(os.getenv("MAX_INPUT_TOKENS", "32_000"))
|
|
| 23 |
|
| 24 |
@spaces.GPU()
|
| 25 |
@torch.inference_mode()
|
| 26 |
-
def generate(message: dict, history: list[dict], system_prompt: str = "", max_new_tokens: int = 512, enable_thinking: bool = True) -> Iterator[str]:
|
| 27 |
# Build messages for Qwen3 (text-only format)
|
| 28 |
messages = []
|
| 29 |
if system_prompt:
|
|
@@ -47,12 +47,16 @@ def generate(message: dict, history: list[dict], system_prompt: str = "", max_ne
|
|
| 47 |
content = item["content"]
|
| 48 |
if isinstance(content, str):
|
| 49 |
messages.append({"role": "user", "content": content})
|
| 50 |
-
|
| 51 |
-
#
|
| 52 |
-
messages.append({"role": "user", "content":
|
| 53 |
|
| 54 |
# Add current user message
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
messages.append({"role": "user", "content": current_message})
|
| 57 |
|
| 58 |
# Apply chat template with enable_thinking parameter
|
|
|
|
| 23 |
|
| 24 |
@spaces.GPU()
|
| 25 |
@torch.inference_mode()
|
| 26 |
+
def generate(message: str | dict, history: list[dict], system_prompt: str = "", max_new_tokens: int = 512, enable_thinking: bool = True) -> Iterator[str]:
|
| 27 |
# Build messages for Qwen3 (text-only format)
|
| 28 |
messages = []
|
| 29 |
if system_prompt:
|
|
|
|
| 47 |
content = item["content"]
|
| 48 |
if isinstance(content, str):
|
| 49 |
messages.append({"role": "user", "content": content})
|
| 50 |
+
elif isinstance(content, dict):
|
| 51 |
+
# Handle dict format
|
| 52 |
+
messages.append({"role": "user", "content": content.get("text", "")})
|
| 53 |
|
| 54 |
# Add current user message
|
| 55 |
+
# Handle both string and dict message formats
|
| 56 |
+
if isinstance(message, str):
|
| 57 |
+
current_message = message
|
| 58 |
+
else:
|
| 59 |
+
current_message = message.get("text", "")
|
| 60 |
messages.append({"role": "user", "content": current_message})
|
| 61 |
|
| 62 |
# Apply chat template with enable_thinking parameter
|