add: LLM output debug script
Browse files- test_llm.py +16 -0
test_llm.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import sys
|
| 3 |
+
sys.path.insert(0, '/workspace/teememo-synth')
|
| 4 |
+
from client import VLLMClient
|
| 5 |
+
|
| 6 |
+
async def test():
|
| 7 |
+
async with VLLMClient() as client:
|
| 8 |
+
result = await client.chat_single(
|
| 9 |
+
messages=[{'role': 'user', 'content': '日本の中高生の悩みを3件、JSON配列のみで出力してください。'}],
|
| 10 |
+
max_tokens=1024,
|
| 11 |
+
enable_thinking=False,
|
| 12 |
+
)
|
| 13 |
+
print('RAW OUTPUT:')
|
| 14 |
+
print(repr(result[:500]))
|
| 15 |
+
|
| 16 |
+
asyncio.run(test())
|