eitans-pu commited on
Commit
7e18424
·
verified ·
1 Parent(s): a6f8504

Upload generate-responses.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. generate-responses.py +581 -0
generate-responses.py ADDED
@@ -0,0 +1,581 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "flashinfer-python",
6
+ # "huggingface-hub[hf_transfer]",
7
+ # "hf-xet>= 1.1.7",
8
+ # "torch",
9
+ # "transformers",
10
+ # "vllm>=0.8.5",
11
+ # ]
12
+ #
13
+ # ///
14
+ """
15
+ Generate responses for prompts in a dataset using vLLM for efficient GPU inference.
16
+ This script loads a dataset from Hugging Face Hub containing chat-formatted messages,
17
+ applies the model's chat template, generates responses using vLLM, and saves the
18
+ results back to the Hub with a comprehensive dataset card.
19
+ Example usage:
20
+ # Local execution with auto GPU detection
21
+ uv run generate-responses.py \\
22
+ username/input-dataset \\
23
+ username/output-dataset \\
24
+ --messages-column messages
25
+ # With custom model and sampling parameters
26
+ uv run generate-responses.py \\
27
+ username/input-dataset \\
28
+ username/output-dataset \\
29
+ --model-id meta-llama/Llama-3.1-8B-Instruct \\
30
+ --temperature 0.9 \\
31
+ --top-p 0.95 \\
32
+ --max-tokens 2048
33
+ # HF Jobs execution (see script output for full command)
34
+ hf jobs uv run --flavor a100x4 ...
35
+ """
36
+
37
+ import argparse
38
+ import logging
39
+ import os
40
+ import sys
41
+ from datetime import datetime
42
+ from typing import Optional
43
+
44
+ from datasets import load_dataset
45
+ from huggingface_hub import DatasetCard, get_token, login
46
+ from torch import cuda
47
+ from tqdm.auto import tqdm
48
+ from transformers import AutoTokenizer
49
+ from vllm import LLM, SamplingParams
50
+
51
+ # Enable HF Transfer for faster downloads
52
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
53
+
54
+ logging.basicConfig(
55
+ level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
56
+ )
57
+ logger = logging.getLogger(__name__)
58
+
59
+
60
+ def check_gpu_availability() -> int:
61
+ """Check if CUDA is available and return the number of GPUs."""
62
+ if not cuda.is_available():
63
+ logger.error("CUDA is not available. This script requires a GPU.")
64
+ logger.error(
65
+ "Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor."
66
+ )
67
+ sys.exit(1)
68
+
69
+ num_gpus = cuda.device_count()
70
+ for i in range(num_gpus):
71
+ gpu_name = cuda.get_device_name(i)
72
+ gpu_memory = cuda.get_device_properties(i).total_memory / 1024**3
73
+ logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory")
74
+
75
+ return num_gpus
76
+
77
+
78
+ def create_dataset_card(
79
+ source_dataset: str,
80
+ model_id: str,
81
+ messages_column: str,
82
+ prompt_column: Optional[str],
83
+ sampling_params: SamplingParams,
84
+ tensor_parallel_size: int,
85
+ num_examples: int,
86
+ generation_time: str,
87
+ num_skipped: int = 0,
88
+ max_model_len_used: Optional[int] = None,
89
+ ) -> str:
90
+ """Create a comprehensive dataset card documenting the generation process."""
91
+ filtering_section = ""
92
+ if num_skipped > 0:
93
+ skip_percentage = (num_skipped / num_examples) * 100
94
+ processed = num_examples - num_skipped
95
+ filtering_section = f"""
96
+ ### Filtering Statistics
97
+ - **Total Examples**: {num_examples:,}
98
+ - **Processed**: {processed:,} ({100 - skip_percentage:.1f}%)
99
+ - **Skipped (too long)**: {num_skipped:,} ({skip_percentage:.1f}%)
100
+ - **Max Model Length Used**: {max_model_len_used:,} tokens
101
+ Note: Prompts exceeding the maximum model length were skipped and have empty responses."""
102
+
103
+ # Build the optional max-model-len line separately to avoid backslashes in f-string expressions
104
+ if max_model_len_used:
105
+ extra_len_line = f" \\\n --max-model-len {max_model_len_used}"
106
+ else:
107
+ extra_len_line = ""
108
+
109
+ input_col = prompt_column if prompt_column else messages_column
110
+ input_desc = "plain text prompts" if prompt_column else "chat messages"
111
+
112
+ return f"""---
113
+ tags:
114
+ - generated
115
+ - vllm
116
+ - uv-script
117
+ ---
118
+ # Generated Responses Dataset
119
+ This dataset contains generated responses for prompts from [{source_dataset}](https://huggingface.co/datasets/{source_dataset}).
120
+
121
+ ## Generation Details
122
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
123
+ - **Input Column**: `{input_col}` ({input_desc})
124
+ - **Model**: [{model_id}](https://huggingface.co/{model_id})
125
+ - **Number of Examples**: {num_examples:,}
126
+ - **Generation Date**: {generation_time}{filtering_section}
127
+
128
+ ### Sampling Parameters
129
+ - **Temperature**: {sampling_params.temperature}
130
+ - **Top P**: {sampling_params.top_p}
131
+ - **Top K**: {sampling_params.top_k}
132
+ - **Min P**: {sampling_params.min_p}
133
+ - **Max Tokens**: {sampling_params.max_tokens}
134
+ - **Repetition Penalty**: {sampling_params.repetition_penalty}
135
+
136
+ ### Hardware Configuration
137
+ - **Tensor Parallel Size**: {tensor_parallel_size}
138
+ - **GPU Configuration**: {tensor_parallel_size} GPU(s)
139
+
140
+ ## Dataset Structure
141
+ The dataset contains all columns from the source dataset plus:
142
+ - `response`: The generated response from the model
143
+
144
+ ## Generation Script
145
+ Generated using the vLLM inference script from [uv-scripts/vllm](https://huggingface.co/datasets/uv-scripts/vllm).
146
+
147
+ To reproduce this generation:
148
+ ```bash
149
+ uv run https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-responses.py \\
150
+ {source_dataset} \\
151
+ <output-dataset> \\
152
+ --model-id {model_id} \\
153
+ {"--prompt-column " + prompt_column if prompt_column else "--messages-column " + messages_column} \\
154
+ --temperature {sampling_params.temperature} \\
155
+ --top-p {sampling_params.top_p} \\
156
+ --top-k {sampling_params.top_k} \\
157
+ --max-tokens {sampling_params.max_tokens}{extra_len_line}
158
+ ```
159
+ """
160
+
161
+
162
+ def main(
163
+ src_dataset_hub_id: str,
164
+ output_dataset_hub_id: str,
165
+ model_id: str = "Qwen/Qwen3-235B-A22B-Instruct-2507",
166
+ messages_column: str = "messages",
167
+ prompt_column: Optional[str] = None,
168
+ output_column: str = "response",
169
+ temperature: float = 0.0,
170
+ top_p: float = 1.0,
171
+ top_k: int = -1,
172
+ min_p: float = 0.0,
173
+ max_tokens: int = 2048,
174
+ repetition_penalty: float = 1.0,
175
+ gpu_memory_utilization: float = 0.90,
176
+ max_model_len: Optional[int] = None,
177
+ tensor_parallel_size: Optional[int] = None,
178
+ skip_long_prompts: bool = True,
179
+ max_samples: Optional[int] = None,
180
+ hf_token: Optional[str] = None,
181
+ ):
182
+ """
183
+ Main generation pipeline.
184
+ Args:
185
+ src_dataset_hub_id: Input dataset on Hugging Face Hub
186
+ output_dataset_hub_id: Where to save results on Hugging Face Hub
187
+ model_id: Hugging Face model ID for generation
188
+ messages_column: Column name containing chat messages
189
+ prompt_column: Column name containing plain text prompts (alternative to messages_column)
190
+ output_column: Column name for generated responses
191
+ temperature: Sampling temperature
192
+ top_p: Top-p sampling parameter
193
+ top_k: Top-k sampling parameter
194
+ min_p: Minimum probability threshold
195
+ max_tokens: Maximum tokens to generate
196
+ repetition_penalty: Repetition penalty parameter
197
+ gpu_memory_utilization: GPU memory utilization factor
198
+ max_model_len: Maximum model context length (None uses model default)
199
+ tensor_parallel_size: Number of GPUs to use (auto-detect if None)
200
+ skip_long_prompts: Skip prompts exceeding max_model_len instead of failing
201
+ max_samples: Maximum number of samples to process (None for all)
202
+ hf_token: Hugging Face authentication token
203
+ """
204
+ generation_start_time = datetime.now().isoformat()
205
+
206
+ # GPU check and configuration
207
+ num_gpus = check_gpu_availability()
208
+ if tensor_parallel_size is None:
209
+ tensor_parallel_size = num_gpus
210
+ logger.info(
211
+ f"Auto-detected {num_gpus} GPU(s), using tensor_parallel_size={tensor_parallel_size}"
212
+ )
213
+ else:
214
+ logger.info(f"Using specified tensor_parallel_size={tensor_parallel_size}")
215
+ if tensor_parallel_size > num_gpus:
216
+ logger.warning(
217
+ f"Requested {tensor_parallel_size} GPUs but only {num_gpus} available"
218
+ )
219
+
220
+ # Authentication - try multiple methods
221
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") or get_token()
222
+
223
+ if not HF_TOKEN:
224
+ logger.error("No HuggingFace token found. Please provide token via:")
225
+ logger.error(" 1. --hf-token argument")
226
+ logger.error(" 2. HF_TOKEN environment variable")
227
+ logger.error(" 3. Run 'huggingface-cli login' or use login() in Python")
228
+ sys.exit(1)
229
+
230
+ logger.info("HuggingFace token found, authenticating...")
231
+ login(token=HF_TOKEN)
232
+
233
+ # Initialize vLLM
234
+ logger.info(f"Loading model: {model_id}")
235
+ vllm_kwargs = {
236
+ "model": model_id,
237
+ "tensor_parallel_size": tensor_parallel_size,
238
+ "gpu_memory_utilization": gpu_memory_utilization,
239
+ "enable_prefix_caching": True,
240
+ }
241
+ if max_model_len is not None:
242
+ vllm_kwargs["max_model_len"] = max_model_len
243
+ logger.info(f"Using max_model_len={max_model_len}")
244
+
245
+ llm = LLM(**vllm_kwargs)
246
+ logger.info("Prefix caching enabled (system prompt shared across all cases)")
247
+
248
+ # Load tokenizer for chat template
249
+ logger.info("Loading tokenizer...")
250
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
251
+
252
+ # Create sampling parameters
253
+ sampling_params = SamplingParams(
254
+ temperature=temperature,
255
+ top_p=top_p,
256
+ top_k=top_k,
257
+ min_p=min_p,
258
+ max_tokens=max_tokens,
259
+ repetition_penalty=repetition_penalty,
260
+ # no guided_decoding here
261
+ )
262
+
263
+ # Load dataset
264
+ logger.info(f"Loading dataset: {src_dataset_hub_id}")
265
+ dataset = load_dataset(src_dataset_hub_id, split="train")
266
+
267
+ # Apply max_samples if specified
268
+ if max_samples is not None and max_samples < len(dataset):
269
+ logger.info(f"Limiting dataset to {max_samples} samples")
270
+ dataset = dataset.select(range(max_samples))
271
+
272
+ total_examples = len(dataset)
273
+ logger.info(f"Dataset loaded with {total_examples:,} examples")
274
+
275
+ # Determine which column to use and validate
276
+ if prompt_column:
277
+ # Use prompt column mode
278
+ if prompt_column not in dataset.column_names:
279
+ logger.error(
280
+ f"Column '{prompt_column}' not found. Available columns: {dataset.column_names}"
281
+ )
282
+ sys.exit(1)
283
+ logger.info(f"Using prompt column mode with column: '{prompt_column}'")
284
+ use_messages = False
285
+ else:
286
+ # Use messages column mode
287
+ if messages_column not in dataset.column_names:
288
+ logger.error(
289
+ f"Column '{messages_column}' not found. Available columns: {dataset.column_names}"
290
+ )
291
+ sys.exit(1)
292
+ logger.info(f"Using messages column mode with column: '{messages_column}'")
293
+ use_messages = True
294
+
295
+ # Get effective max length for filtering
296
+ if max_model_len is not None:
297
+ effective_max_len = max_model_len
298
+ else:
299
+ # Get model's default max length
300
+ effective_max_len = llm.llm_engine.model_config.max_model_len
301
+ logger.info(f"Using effective max model length: {effective_max_len}")
302
+
303
+ # Process messages and apply chat template
304
+ logger.info("Preparing prompts...")
305
+ all_prompts = []
306
+ valid_prompts = []
307
+ valid_indices = []
308
+ skipped_info = []
309
+
310
+ for i, example in enumerate(tqdm(dataset, desc="Processing prompts")):
311
+ if use_messages:
312
+ # Messages mode: use existing chat messages
313
+ messages = example[messages_column]
314
+ # Apply chat template
315
+ prompt = tokenizer.apply_chat_template(
316
+ messages, tokenize=False, add_generation_prompt=True
317
+ )
318
+ else:
319
+ # Prompt mode: convert plain text to messages format
320
+ user_prompt = example[prompt_column]
321
+ messages = [{"role": "user", "content": user_prompt}]
322
+ # Apply chat template
323
+ prompt = tokenizer.apply_chat_template(
324
+ messages, tokenize=False, add_generation_prompt=True
325
+ )
326
+
327
+ all_prompts.append(prompt)
328
+
329
+ # Count tokens if filtering is enabled
330
+ if skip_long_prompts:
331
+ tokens = tokenizer.encode(prompt)
332
+ if len(tokens) <= effective_max_len:
333
+ valid_prompts.append(prompt)
334
+ valid_indices.append(i)
335
+ else:
336
+ skipped_info.append((i, len(tokens)))
337
+ else:
338
+ valid_prompts.append(prompt)
339
+ valid_indices.append(i)
340
+
341
+ # Log filtering results
342
+ if skip_long_prompts and skipped_info:
343
+ logger.warning(
344
+ f"Skipped {len(skipped_info)} prompts that exceed max_model_len ({effective_max_len} tokens)"
345
+ )
346
+ logger.info("Skipped prompt details (first 10):")
347
+ for idx, (prompt_idx, token_count) in enumerate(skipped_info[:10]):
348
+ logger.info(
349
+ f" - Example {prompt_idx}: {token_count} tokens (exceeds by {token_count - effective_max_len})"
350
+ )
351
+ if len(skipped_info) > 10:
352
+ logger.info(f" ... and {len(skipped_info) - 10} more")
353
+
354
+ skip_percentage = (len(skipped_info) / total_examples) * 100
355
+ if skip_percentage > 10:
356
+ logger.warning(f"WARNING: {skip_percentage:.1f}% of prompts were skipped!")
357
+
358
+ if not valid_prompts:
359
+ logger.error("No valid prompts to process after filtering!")
360
+ sys.exit(1)
361
+
362
+ # Generate responses - vLLM handles batching internally
363
+ logger.info(f"Starting generation for {len(valid_prompts):,} valid prompts...")
364
+ logger.info("vLLM will handle batching and scheduling automatically")
365
+
366
+ outputs = llm.generate(valid_prompts, sampling_params)
367
+
368
+ # Extract generated text and create full response list
369
+ logger.info("Extracting generated responses...")
370
+ responses = [""] * total_examples # Initialize with empty strings
371
+
372
+ for idx, output in enumerate(outputs):
373
+ original_idx = valid_indices[idx]
374
+ response = output.outputs[0].text.strip()
375
+ responses[original_idx] = response
376
+
377
+ # Add responses to dataset
378
+ logger.info("Adding responses to dataset...")
379
+ dataset = dataset.add_column(output_column, responses)
380
+
381
+ # Create dataset card
382
+ logger.info("Creating dataset card...")
383
+ card_content = create_dataset_card(
384
+ source_dataset=src_dataset_hub_id,
385
+ model_id=model_id,
386
+ messages_column=messages_column,
387
+ prompt_column=prompt_column,
388
+ sampling_params=sampling_params,
389
+ tensor_parallel_size=tensor_parallel_size,
390
+ num_examples=total_examples,
391
+ generation_time=generation_start_time,
392
+ num_skipped=len(skipped_info) if skip_long_prompts else 0,
393
+ max_model_len_used=effective_max_len if skip_long_prompts else None,
394
+ )
395
+
396
+ # Push dataset to hub
397
+ logger.info(f"Pushing dataset to: {output_dataset_hub_id}")
398
+ dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
399
+
400
+ # Push dataset card
401
+ card = DatasetCard(card_content)
402
+ card.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
403
+
404
+ logger.info("✅ Generation complete!")
405
+ logger.info(
406
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset_hub_id}"
407
+ )
408
+
409
+
410
+ if __name__ == "__main__":
411
+ if len(sys.argv) > 1:
412
+ parser = argparse.ArgumentParser(
413
+ description="Generate responses for dataset prompts using vLLM",
414
+ formatter_class=argparse.RawDescriptionHelpFormatter,
415
+ epilog="""
416
+ Examples:
417
+ # Basic usage with default Qwen model
418
+ uv run generate-responses.py input-dataset output-dataset
419
+
420
+ # With custom model and parameters
421
+ uv run generate-responses.py input-dataset output-dataset \\
422
+ --model-id meta-llama/Llama-3.1-8B-Instruct \\
423
+ --temperature 0.9 \\
424
+ --max-tokens 2048
425
+
426
+ # Force specific GPU configuration
427
+ uv run generate-responses.py input-dataset output-dataset \\
428
+ --tensor-parallel-size 2 \\
429
+ --gpu-memory-utilization 0.95
430
+
431
+ # Using environment variable for token
432
+ HF_TOKEN=hf_xxx uv run generate-responses.py input-dataset output-dataset
433
+ """,
434
+ )
435
+
436
+ parser.add_argument(
437
+ "src_dataset_hub_id",
438
+ help="Input dataset on Hugging Face Hub (e.g., username/dataset-name)",
439
+ )
440
+ parser.add_argument(
441
+ "output_dataset_hub_id", help="Output dataset name on Hugging Face Hub"
442
+ )
443
+ parser.add_argument(
444
+ "--model-id",
445
+ type=str,
446
+ default=os.environ.get("MODEL_ID", "Qwen/Qwen3-235B-A22B-Instruct-2507"),
447
+ help="Model to use for generation (default: Qwen3-235B-A22B, or MODEL_ID env var)",
448
+ )
449
+ parser.add_argument(
450
+ "--messages-column",
451
+ type=str,
452
+ default="messages",
453
+ help="Column containing chat messages (default: messages)",
454
+ )
455
+ parser.add_argument(
456
+ "--prompt-column",
457
+ type=str,
458
+ help="Column containing plain text prompts (alternative to --messages-column)",
459
+ )
460
+ parser.add_argument(
461
+ "--output-column",
462
+ type=str,
463
+ default="response",
464
+ help="Column name for generated responses (default: response)",
465
+ )
466
+ parser.add_argument(
467
+ "--max-samples",
468
+ type=int,
469
+ help="Maximum number of samples to process (default: all)",
470
+ )
471
+ parser.add_argument(
472
+ "--temperature",
473
+ type=float,
474
+ default=0.0,
475
+ help="Sampling temperature (default: 0.0 for deterministic output)",
476
+ )
477
+ parser.add_argument(
478
+ "--top-p",
479
+ type=float,
480
+ default=1.0,
481
+ help="Top-p sampling parameter (default: 1.0; unused when temperature=0)",
482
+ )
483
+ parser.add_argument(
484
+ "--top-k",
485
+ type=int,
486
+ default=-1,
487
+ help="Top-k sampling parameter (default: -1 disabled; unused when temperature=0)",
488
+ )
489
+ parser.add_argument(
490
+ "--min-p",
491
+ type=float,
492
+ default=0.0,
493
+ help="Minimum probability threshold (default: 0.0)",
494
+ )
495
+ parser.add_argument(
496
+ "--max-tokens",
497
+ type=int,
498
+ default=2048,
499
+ help="Maximum tokens to generate (default: 2048)",
500
+ )
501
+ parser.add_argument(
502
+ "--repetition-penalty",
503
+ type=float,
504
+ default=1.0,
505
+ help="Repetition penalty (default: 1.0)",
506
+ )
507
+ parser.add_argument(
508
+ "--gpu-memory-utilization",
509
+ type=float,
510
+ default=0.90,
511
+ help="GPU memory utilization factor (default: 0.90)",
512
+ )
513
+ parser.add_argument(
514
+ "--max-model-len",
515
+ type=int,
516
+ help="Maximum model context length (default: model's default)",
517
+ )
518
+ parser.add_argument(
519
+ "--tensor-parallel-size",
520
+ type=int,
521
+ help="Number of GPUs to use (default: auto-detect)",
522
+ )
523
+ parser.add_argument(
524
+ "--hf-token",
525
+ type=str,
526
+ help="Hugging Face token (can also use HF_TOKEN env var)",
527
+ )
528
+ parser.add_argument(
529
+ "--skip-long-prompts",
530
+ action="store_true",
531
+ default=True,
532
+ help="Skip prompts that exceed max_model_len instead of failing (default: True)",
533
+ )
534
+ parser.add_argument(
535
+ "--no-skip-long-prompts",
536
+ dest="skip_long_prompts",
537
+ action="store_false",
538
+ help="Fail on prompts that exceed max_model_len",
539
+ )
540
+
541
+ args = parser.parse_args()
542
+
543
+ main(
544
+ src_dataset_hub_id=args.src_dataset_hub_id,
545
+ output_dataset_hub_id=args.output_dataset_hub_id,
546
+ model_id=args.model_id,
547
+ messages_column=args.messages_column,
548
+ prompt_column=args.prompt_column,
549
+ output_column=args.output_column,
550
+ temperature=args.temperature,
551
+ top_p=args.top_p,
552
+ top_k=args.top_k,
553
+ min_p=args.min_p,
554
+ max_tokens=args.max_tokens,
555
+ repetition_penalty=args.repetition_penalty,
556
+ gpu_memory_utilization=args.gpu_memory_utilization,
557
+ max_model_len=args.max_model_len,
558
+ tensor_parallel_size=args.tensor_parallel_size,
559
+ skip_long_prompts=args.skip_long_prompts,
560
+ max_samples=args.max_samples,
561
+ hf_token=args.hf_token,
562
+ )
563
+ else:
564
+ # Show HF Jobs example when run without arguments
565
+ print("""
566
+ vLLM Response Generation Script
567
+ ==============================
568
+ This script requires arguments. For usage information:
569
+ uv run generate-responses.py --help
570
+ Example HF Jobs command with multi-GPU:
571
+ # If you're logged in with huggingface-cli, token will be auto-detected
572
+ hf jobs uv run \\
573
+ --flavor l4x4 \\
574
+ https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-responses.py \\
575
+ username/input-dataset \\
576
+ username/output-dataset \\
577
+ --messages-column messages \\
578
+ --model-id Qwen/Qwen3-30B-A3B-Instruct-2507 \\
579
+ --temperature 0.0 \\
580
+ --max-tokens 2048
581
+ """)