tytodd commited on
Commit
e8c24ba
·
verified ·
1 Parent(s): 2ed83f0

Upload generate_responses.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. generate_responses.py +549 -0
generate_responses.py ADDED
@@ -0,0 +1,549 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "flashinfer-python",
6
+ # "huggingface-hub[hf_transfer]",
7
+ # "hf-xet>= 1.1.7",
8
+ # "torch",
9
+ # "transformers",
10
+ # "vllm>=0.8.5",
11
+ # ]
12
+ #
13
+ # ///
14
+ """
15
+ Generate responses for prompts in a dataset using vLLM for efficient GPU inference.
16
+
17
+ This script loads a dataset from Hugging Face Hub containing chat-formatted messages,
18
+ applies the model's chat template, generates responses using vLLM, and saves the
19
+ results back to the Hub with a comprehensive dataset card.
20
+
21
+ Example usage:
22
+ # Local execution with auto GPU detection
23
+ uv run generate_responses.py \\
24
+ username/input-dataset \\
25
+ username/output-dataset \\
26
+ --messages-column messages
27
+
28
+ # With custom model and sampling parameters
29
+ uv run generate_responses.py \\
30
+ username/input-dataset \\
31
+ username/output-dataset \\
32
+ --model-id meta-llama/Llama-3.1-8B-Instruct \\
33
+ --temperature 0.9 \\
34
+ --top-p 0.95 \\
35
+ --max-tokens 2048
36
+
37
+ # HF Jobs execution (see script output for full command)
38
+ hf jobs uv run --flavor a100x4 ...
39
+ """
40
+
41
+ import argparse
42
+ import logging
43
+ import os
44
+ import sys
45
+ from datetime import datetime
46
+ from typing import Optional
47
+
48
+ from datasets import load_dataset
49
+ from huggingface_hub import DatasetCard, get_token, login
50
+ from torch import cuda
51
+ from tqdm.auto import tqdm
52
+ from vllm import LLM, SamplingParams
53
+
54
+ # Enable HF Transfer for faster downloads
55
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
56
+
57
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
58
+ logger = logging.getLogger(__name__)
59
+
60
+ UV_SCRIPT_REPO_ID = "modaic/batch-vllm"
61
+ UV_SCRIPT_FILENAME = "generate_responses.py"
62
+ UV_SCRIPT_URL = f"https://huggingface.co/datasets/{UV_SCRIPT_REPO_ID}/resolve/main/{UV_SCRIPT_FILENAME}"
63
+
64
+
65
+ def check_gpu_availability() -> int:
66
+ """Check if CUDA is available and return the number of GPUs."""
67
+ if not cuda.is_available():
68
+ logger.error("CUDA is not available. This script requires a GPU.")
69
+ logger.error("Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor.")
70
+ sys.exit(1)
71
+
72
+ num_gpus = cuda.device_count()
73
+ for i in range(num_gpus):
74
+ gpu_name = cuda.get_device_name(i)
75
+ gpu_memory = cuda.get_device_properties(i).total_memory / 1024**3
76
+ logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory")
77
+
78
+ return num_gpus
79
+
80
+
81
+ def create_dataset_card(
82
+ source_dataset: str,
83
+ model_id: str,
84
+ messages_column: str,
85
+ prompt_column: Optional[str],
86
+ sampling_params: SamplingParams,
87
+ tensor_parallel_size: int,
88
+ num_examples: int,
89
+ generation_time: str,
90
+ num_skipped: int = 0,
91
+ max_model_len_used: Optional[int] = None,
92
+ ) -> str:
93
+ """Create a comprehensive dataset card documenting the generation process."""
94
+ filtering_section = ""
95
+ input_column_flag = f"--prompt-column {prompt_column}" if prompt_column else f"--messages-column {messages_column}"
96
+ max_model_len_flag = f" \\\n --max-model-len {max_model_len_used}" if max_model_len_used else ""
97
+
98
+ if num_skipped > 0:
99
+ skip_percentage = (num_skipped / num_examples) * 100
100
+ processed = num_examples - num_skipped
101
+ filtering_section = f"""
102
+
103
+ ### Filtering Statistics
104
+
105
+ - **Total Examples**: {num_examples:,}
106
+ - **Processed**: {processed:,} ({100 - skip_percentage:.1f}%)
107
+ - **Skipped (too long)**: {num_skipped:,} ({skip_percentage:.1f}%)
108
+ - **Max Model Length Used**: {max_model_len_used:,} tokens
109
+
110
+ Note: Prompts exceeding the maximum model length were skipped and have empty responses."""
111
+
112
+ return f"""---
113
+ tags:
114
+ - generated
115
+ - vllm
116
+ - uv-script
117
+ ---
118
+
119
+ # Generated Responses Dataset
120
+
121
+ This dataset contains generated responses for prompts from [{source_dataset}](https://huggingface.co/datasets/{source_dataset}).
122
+
123
+ ## Generation Details
124
+
125
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
126
+ - **Input Column**: `{prompt_column if prompt_column else messages_column}` ({"plain text prompts" if prompt_column else "chat messages"})
127
+ - **Model**: [{model_id}](https://huggingface.co/{model_id})
128
+ - **Number of Examples**: {num_examples:,}
129
+ - **Generation Date**: {generation_time}{filtering_section}
130
+
131
+ ### Sampling Parameters
132
+
133
+ - **Temperature**: {sampling_params.temperature}
134
+ - **Top P**: {sampling_params.top_p}
135
+ - **Top K**: {sampling_params.top_k}
136
+ - **Min P**: {sampling_params.min_p}
137
+ - **Max Tokens**: {sampling_params.max_tokens}
138
+ - **Repetition Penalty**: {sampling_params.repetition_penalty}
139
+
140
+ ### Hardware Configuration
141
+
142
+ - **Tensor Parallel Size**: {tensor_parallel_size}
143
+ - **GPU Configuration**: {tensor_parallel_size} GPU(s)
144
+
145
+ ## Dataset Structure
146
+
147
+ The dataset contains all columns from the source dataset plus:
148
+ - `response`: The generated response from the model
149
+
150
+ ## Generation Script
151
+
152
+ Generated using the vLLM inference script from [{UV_SCRIPT_REPO_ID}](https://huggingface.co/datasets/{UV_SCRIPT_REPO_ID}).
153
+
154
+ To reproduce this generation:
155
+
156
+ ```bash
157
+ uv run {UV_SCRIPT_URL} \\
158
+ {source_dataset} \\
159
+ <output-dataset> \\
160
+ --model-id {model_id} \\
161
+ {input_column_flag} \\
162
+ --temperature {sampling_params.temperature} \\
163
+ --top-p {sampling_params.top_p} \\
164
+ --top-k {sampling_params.top_k} \\
165
+ --max-tokens {sampling_params.max_tokens}{max_model_len_flag}
166
+ ```
167
+ """
168
+
169
+
170
+ def main(
171
+ src_dataset_hub_id: str,
172
+ output_dataset_hub_id: str,
173
+ model_id: str = "Qwen/Qwen3-30B-A3B-Instruct-2507",
174
+ messages_column: str = "messages",
175
+ prompt_column: Optional[str] = None,
176
+ output_column: str = "response",
177
+ temperature: float = 0.7,
178
+ top_p: float = 0.8,
179
+ top_k: int = 20,
180
+ min_p: float = 0.0,
181
+ max_tokens: int = 16384,
182
+ repetition_penalty: float = 1.0,
183
+ gpu_memory_utilization: float = 0.90,
184
+ max_model_len: Optional[int] = None,
185
+ tensor_parallel_size: Optional[int] = None,
186
+ skip_long_prompts: bool = True,
187
+ enable_thinking: bool = False,
188
+ max_samples: Optional[int] = None,
189
+ hf_token: Optional[str] = None,
190
+ ):
191
+ """
192
+ Main generation pipeline.
193
+
194
+ Args:
195
+ src_dataset_hub_id: Input dataset on Hugging Face Hub
196
+ output_dataset_hub_id: Where to save results on Hugging Face Hub
197
+ model_id: Hugging Face model ID for generation
198
+ messages_column: Column name containing chat messages
199
+ prompt_column: Column name containing plain text prompts (alternative to messages_column)
200
+ output_column: Column name for generated responses
201
+ temperature: Sampling temperature
202
+ top_p: Top-p sampling parameter
203
+ top_k: Top-k sampling parameter
204
+ min_p: Minimum probability threshold
205
+ max_tokens: Maximum tokens to generate
206
+ repetition_penalty: Repetition penalty parameter
207
+ gpu_memory_utilization: GPU memory utilization factor
208
+ max_model_len: Maximum model context length (None uses model default)
209
+ tensor_parallel_size: Number of GPUs to use (auto-detect if None)
210
+ skip_long_prompts: Deprecated. Prompt pre-filtering is not used in chat mode.
211
+ enable_thinking: Enable model thinking/reasoning when supported by the chat template
212
+ max_samples: Maximum number of samples to process (None for all)
213
+ hf_token: Hugging Face authentication token
214
+ """
215
+ generation_start_time = datetime.now().isoformat()
216
+
217
+ # GPU check and configuration
218
+ num_gpus = check_gpu_availability()
219
+ if tensor_parallel_size is None:
220
+ tensor_parallel_size = num_gpus
221
+ logger.info(f"Auto-detected {num_gpus} GPU(s), using tensor_parallel_size={tensor_parallel_size}")
222
+ else:
223
+ logger.info(f"Using specified tensor_parallel_size={tensor_parallel_size}")
224
+ if tensor_parallel_size > num_gpus:
225
+ logger.warning(f"Requested {tensor_parallel_size} GPUs but only {num_gpus} available")
226
+
227
+ # Authentication - try multiple methods
228
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") or get_token()
229
+
230
+ if not HF_TOKEN:
231
+ logger.error("No HuggingFace token found. Please provide token via:")
232
+ logger.error(" 1. --hf-token argument")
233
+ logger.error(" 2. HF_TOKEN environment variable")
234
+ logger.error(" 3. Run 'huggingface-cli login' or use login() in Python")
235
+ sys.exit(1)
236
+
237
+ logger.info("HuggingFace token found, authenticating...")
238
+ login(token=HF_TOKEN)
239
+
240
+ # Initialize vLLM
241
+ logger.info(f"Loading model: {model_id}")
242
+ vllm_kwargs = {
243
+ "model": model_id,
244
+ "tensor_parallel_size": tensor_parallel_size,
245
+ "gpu_memory_utilization": gpu_memory_utilization,
246
+ }
247
+ if max_model_len is not None:
248
+ vllm_kwargs["max_model_len"] = max_model_len
249
+ logger.info(f"Using max_model_len={max_model_len}")
250
+
251
+ llm = LLM(**vllm_kwargs)
252
+
253
+ # Create sampling parameters
254
+ sampling_params = SamplingParams(
255
+ temperature=temperature,
256
+ top_p=top_p,
257
+ top_k=top_k,
258
+ min_p=min_p,
259
+ max_tokens=max_tokens,
260
+ repetition_penalty=repetition_penalty,
261
+ )
262
+
263
+ # Load dataset
264
+ logger.info(f"Loading dataset: {src_dataset_hub_id}")
265
+ dataset = load_dataset(src_dataset_hub_id, split="train")
266
+
267
+ # Apply max_samples if specified
268
+ if max_samples is not None and max_samples < len(dataset):
269
+ logger.info(f"Limiting dataset to {max_samples} samples")
270
+ dataset = dataset.select(range(max_samples))
271
+
272
+ total_examples = len(dataset)
273
+ logger.info(f"Dataset loaded with {total_examples:,} examples")
274
+
275
+ # Determine which column to use and validate
276
+ if prompt_column:
277
+ # Use prompt column mode
278
+ if prompt_column not in dataset.column_names:
279
+ logger.error(f"Column '{prompt_column}' not found. Available columns: {dataset.column_names}")
280
+ sys.exit(1)
281
+ logger.info(f"Using prompt column mode with column: '{prompt_column}'")
282
+ use_messages = False
283
+ else:
284
+ # Use messages column mode
285
+ if messages_column not in dataset.column_names:
286
+ logger.error(f"Column '{messages_column}' not found. Available columns: {dataset.column_names}")
287
+ sys.exit(1)
288
+ logger.info(f"Using messages column mode with column: '{messages_column}'")
289
+ use_messages = True
290
+
291
+ if skip_long_prompts:
292
+ logger.info(
293
+ "Prompt length pre-filtering is disabled when using llm.chat(); model limits will be enforced at inference time"
294
+ )
295
+
296
+ logger.info("Preparing chat messages...")
297
+ conversations = []
298
+
299
+ for example in tqdm(dataset, desc="Processing prompts"):
300
+ if use_messages:
301
+ messages = example[messages_column]
302
+ else:
303
+ user_prompt = example[prompt_column]
304
+ messages = [{"role": "user", "content": user_prompt}]
305
+
306
+ conversations.append(messages)
307
+
308
+ if not conversations:
309
+ logger.error("No prompts to process!")
310
+ sys.exit(1)
311
+
312
+ # Generate responses - vLLM handles batching internally
313
+ logger.info(f"Starting chat generation for {len(conversations):,} prompts...")
314
+ logger.info("vLLM will handle batching and scheduling automatically")
315
+
316
+ outputs = llm.chat(
317
+ conversations,
318
+ sampling_params=sampling_params,
319
+ chat_template_kwargs={"enable_thinking": enable_thinking},
320
+ )
321
+
322
+ # Extract generated text and create full response list
323
+ logger.info("Extracting generated responses...")
324
+ responses = [""] * total_examples
325
+
326
+ for idx, output in enumerate(outputs):
327
+ response = output.outputs[0].text.strip()
328
+ responses[idx] = response
329
+
330
+ # Add responses to dataset
331
+ logger.info("Adding responses to dataset...")
332
+ dataset = dataset.add_column(output_column, responses)
333
+
334
+ # Create dataset card
335
+ logger.info("Creating dataset card...")
336
+ card_content = create_dataset_card(
337
+ source_dataset=src_dataset_hub_id,
338
+ model_id=model_id,
339
+ messages_column=messages_column,
340
+ prompt_column=prompt_column,
341
+ sampling_params=sampling_params,
342
+ tensor_parallel_size=tensor_parallel_size,
343
+ num_examples=total_examples,
344
+ generation_time=generation_start_time,
345
+ num_skipped=0,
346
+ max_model_len_used=max_model_len,
347
+ )
348
+
349
+ # Push dataset to hub
350
+ logger.info(f"Pushing dataset to: {output_dataset_hub_id}")
351
+ dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
352
+
353
+ # Push dataset card
354
+ card = DatasetCard(card_content)
355
+ card.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
356
+
357
+ logger.info("✅ Generation complete!")
358
+ logger.info(f"Dataset available at: https://huggingface.co/datasets/{output_dataset_hub_id}")
359
+
360
+
361
+ if __name__ == "__main__":
362
+ if len(sys.argv) > 1:
363
+ parser = argparse.ArgumentParser(
364
+ description="Generate responses for dataset prompts using vLLM",
365
+ formatter_class=argparse.RawDescriptionHelpFormatter,
366
+ epilog="""
367
+ Examples:
368
+ # Basic usage with default Qwen model
369
+ uv run generate-responses.py input-dataset output-dataset
370
+
371
+ # With custom model and parameters
372
+ uv run generate-responses.py input-dataset output-dataset \\
373
+ --model-id meta-llama/Llama-3.1-8B-Instruct \\
374
+ --temperature 0.9 \\
375
+ --max-tokens 2048
376
+
377
+ # Force specific GPU configuration
378
+ uv run generate-responses.py input-dataset output-dataset \\
379
+ --tensor-parallel-size 2 \\
380
+ --gpu-memory-utilization 0.95
381
+
382
+ # Using environment variable for token
383
+ HF_TOKEN=hf_xxx uv run generate-responses.py input-dataset output-dataset
384
+ """,
385
+ )
386
+
387
+ parser.add_argument(
388
+ "src_dataset_hub_id",
389
+ help="Input dataset on Hugging Face Hub (e.g., username/dataset-name)",
390
+ )
391
+ parser.add_argument("output_dataset_hub_id", help="Output dataset name on Hugging Face Hub")
392
+ parser.add_argument(
393
+ "--model-id",
394
+ type=str,
395
+ default="Qwen/Qwen3-30B-A3B-Instruct-2507",
396
+ help="Model to use for generation (default: Qwen3-30B-A3B-Instruct-2507)",
397
+ )
398
+ parser.add_argument(
399
+ "--messages-column",
400
+ type=str,
401
+ default="messages",
402
+ help="Column containing chat messages (default: messages)",
403
+ )
404
+ parser.add_argument(
405
+ "--prompt-column",
406
+ type=str,
407
+ help="Column containing plain text prompts (alternative to --messages-column)",
408
+ )
409
+ parser.add_argument(
410
+ "--output-column",
411
+ type=str,
412
+ default="response",
413
+ help="Column name for generated responses (default: response)",
414
+ )
415
+ parser.add_argument(
416
+ "--max-samples",
417
+ type=int,
418
+ help="Maximum number of samples to process (default: all)",
419
+ )
420
+ parser.add_argument(
421
+ "--temperature",
422
+ type=float,
423
+ default=0.7,
424
+ help="Sampling temperature (default: 0.7)",
425
+ )
426
+ parser.add_argument(
427
+ "--top-p",
428
+ type=float,
429
+ default=0.8,
430
+ help="Top-p sampling parameter (default: 0.8)",
431
+ )
432
+ parser.add_argument(
433
+ "--top-k",
434
+ type=int,
435
+ default=20,
436
+ help="Top-k sampling parameter (default: 20)",
437
+ )
438
+ parser.add_argument(
439
+ "--min-p",
440
+ type=float,
441
+ default=0.0,
442
+ help="Minimum probability threshold (default: 0.0)",
443
+ )
444
+ parser.add_argument(
445
+ "--max-tokens",
446
+ type=int,
447
+ default=16384,
448
+ help="Maximum tokens to generate (default: 16384)",
449
+ )
450
+ parser.add_argument(
451
+ "--repetition-penalty",
452
+ type=float,
453
+ default=1.0,
454
+ help="Repetition penalty (default: 1.0)",
455
+ )
456
+ parser.add_argument(
457
+ "--gpu-memory-utilization",
458
+ type=float,
459
+ default=0.90,
460
+ help="GPU memory utilization factor (default: 0.90)",
461
+ )
462
+ parser.add_argument(
463
+ "--max-model-len",
464
+ type=int,
465
+ help="Maximum model context length (default: model's default)",
466
+ )
467
+ parser.add_argument(
468
+ "--tensor-parallel-size",
469
+ type=int,
470
+ help="Number of GPUs to use (default: auto-detect)",
471
+ )
472
+ parser.add_argument(
473
+ "--enable-thinking",
474
+ action="store_true",
475
+ default=False,
476
+ help="Enable model thinking/reasoning when supported (default: False)",
477
+ )
478
+ parser.add_argument(
479
+ "--hf-token",
480
+ type=str,
481
+ help="Hugging Face token (can also use HF_TOKEN env var)",
482
+ )
483
+ parser.add_argument(
484
+ "--skip-long-prompts",
485
+ action="store_true",
486
+ default=True,
487
+ help="Skip prompts that exceed max_model_len instead of failing (default: True)",
488
+ )
489
+ parser.add_argument(
490
+ "--no-skip-long-prompts",
491
+ dest="skip_long_prompts",
492
+ action="store_false",
493
+ help="Fail on prompts that exceed max_model_len",
494
+ )
495
+
496
+ args = parser.parse_args()
497
+
498
+ main(
499
+ src_dataset_hub_id=args.src_dataset_hub_id,
500
+ output_dataset_hub_id=args.output_dataset_hub_id,
501
+ model_id=args.model_id,
502
+ messages_column=args.messages_column,
503
+ prompt_column=args.prompt_column,
504
+ output_column=args.output_column,
505
+ temperature=args.temperature,
506
+ top_p=args.top_p,
507
+ top_k=args.top_k,
508
+ min_p=args.min_p,
509
+ max_tokens=args.max_tokens,
510
+ repetition_penalty=args.repetition_penalty,
511
+ gpu_memory_utilization=args.gpu_memory_utilization,
512
+ max_model_len=args.max_model_len,
513
+ tensor_parallel_size=args.tensor_parallel_size,
514
+ skip_long_prompts=args.skip_long_prompts,
515
+ enable_thinking=args.enable_thinking,
516
+ max_samples=args.max_samples,
517
+ hf_token=args.hf_token,
518
+ )
519
+ else:
520
+ # Show HF Jobs example when run without arguments
521
+ print(f"""
522
+ vLLM Response Generation Script
523
+ ==============================
524
+
525
+ This script requires arguments. For usage information:
526
+ uv run generate_responses.py --help
527
+
528
+ Upload this script to the Hub:
529
+ hf upload --repo-type dataset \\
530
+ modaic/batch-vllm \\
531
+ src/modaic-sdk/modaic/batch/generate_responses.py \\
532
+ generate_responses.py
533
+
534
+ Canonical script URL:
535
+ {UV_SCRIPT_URL}
536
+
537
+ Example HF Jobs command with multi-GPU:
538
+ # If you're logged in with huggingface-cli, token will be auto-detected
539
+ hf jobs uv run \\
540
+ --flavor l4x4 \\
541
+ --secrets HF_TOKEN \\
542
+ {UV_SCRIPT_URL} \\
543
+ username/input-dataset \\
544
+ username/output-dataset \\
545
+ --messages-column messages \\
546
+ --model-id Qwen/Qwen3-30B-A3B-Instruct-2507 \\
547
+ --temperature 0.7 \\
548
+ --max-tokens 16384
549
+ """)