Datasets:

License:
freddyaboulton HF Staff commited on
Commit
ae3f7e0
·
verified ·
1 Parent(s): 5cb6266

Create classify.py

Browse files
Files changed (1) hide show
  1. classify.py +590 -0
classify.py ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "flashinfer-python",
6
+ # "huggingface-hub[hf_transfer]",
7
+ # "hf-xet>= 1.1.7",
8
+ # "torch",
9
+ # "transformers",
10
+ # "vllm>=0.8.5",
11
+ # ]
12
+ #
13
+ # ///
14
+ """
15
+ Generate responses for prompts in a dataset using vLLM for efficient GPU inference.
16
+
17
+ This script loads a dataset from Hugging Face Hub containing chat-formatted messages,
18
+ applies the model's chat template, generates responses using vLLM, and saves the
19
+ results back to the Hub with a comprehensive dataset card.
20
+
21
+ Example usage:
22
+ # Local execution with auto GPU detection
23
+ uv run generate-responses.py \\
24
+ username/input-dataset \\
25
+ username/output-dataset \\
26
+ --messages-column messages
27
+
28
+ # With custom model and sampling parameters
29
+ uv run generate-responses.py \\
30
+ username/input-dataset \\
31
+ username/output-dataset \\
32
+ --model-id meta-llama/Llama-3.1-8B-Instruct \\
33
+ --temperature 0.9 \\
34
+ --top-p 0.95 \\
35
+ --max-tokens 2048
36
+
37
+ # HF Jobs execution (see script output for full command)
38
+ hf jobs uv run --flavor a100x4 ...
39
+ """
40
+
41
+ import argparse
42
+ import logging
43
+ import os
44
+ import sys
45
+ from datetime import datetime
46
+ from typing import Optional
47
+
48
+ from datasets import load_dataset
49
+ from huggingface_hub import DatasetCard, get_token, login
50
+ from torch import cuda
51
+ from tqdm.auto import tqdm
52
+ from transformers import AutoTokenizer
53
+ from vllm import LLM, SamplingParams
54
+ from vllm.sampling_params import GuidedDecodingParams
55
+
56
+ # Enable HF Transfer for faster downloads
57
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
58
+
59
+ logging.basicConfig(
60
+ level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
61
+ )
62
+ logger = logging.getLogger(__name__)
63
+
64
+
65
+ def check_gpu_availability() -> int:
66
+ """Check if CUDA is available and return the number of GPUs."""
67
+ if not cuda.is_available():
68
+ logger.error("CUDA is not available. This script requires a GPU.")
69
+ logger.error(
70
+ "Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor."
71
+ )
72
+ sys.exit(1)
73
+
74
+ num_gpus = cuda.device_count()
75
+ for i in range(num_gpus):
76
+ gpu_name = cuda.get_device_name(i)
77
+ gpu_memory = cuda.get_device_properties(i).total_memory / 1024**3
78
+ logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory")
79
+
80
+ return num_gpus
81
+
82
+
83
+ def create_dataset_card(
84
+ source_dataset: str,
85
+ model_id: str,
86
+ messages_column: str,
87
+ prompt_column: Optional[str],
88
+ sampling_params: SamplingParams,
89
+ tensor_parallel_size: int,
90
+ num_examples: int,
91
+ generation_time: str,
92
+ num_skipped: int = 0,
93
+ max_model_len_used: Optional[int] = None,
94
+ ) -> str:
95
+ """Create a comprehensive dataset card documenting the generation process."""
96
+ filtering_section = ""
97
+ if num_skipped > 0:
98
+ skip_percentage = (num_skipped / num_examples) * 100
99
+ processed = num_examples - num_skipped
100
+ filtering_section = f"""
101
+
102
+ ### Filtering Statistics
103
+
104
+ - **Total Examples**: {num_examples:,}
105
+ - **Processed**: {processed:,} ({100 - skip_percentage:.1f}%)
106
+ - **Skipped (too long)**: {num_skipped:,} ({skip_percentage:.1f}%)
107
+ - **Max Model Length Used**: {max_model_len_used:,} tokens
108
+
109
+ Note: Prompts exceeding the maximum model length were skipped and have empty responses."""
110
+
111
+ return f"""---
112
+ tags:
113
+ - generated
114
+ - vllm
115
+ - uv-script
116
+ ---
117
+
118
+ # Generated Responses Dataset
119
+
120
+ This dataset contains generated responses for prompts from [{source_dataset}](https://huggingface.co/datasets/{source_dataset}).
121
+
122
+ ## Generation Details
123
+
124
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
125
+ - **Input Column**: `{prompt_column if prompt_column else messages_column}` ({"plain text prompts" if prompt_column else "chat messages"})
126
+ - **Model**: [{model_id}](https://huggingface.co/{model_id})
127
+ - **Number of Examples**: {num_examples:,}
128
+ - **Generation Date**: {generation_time}{filtering_section}
129
+
130
+ ### Sampling Parameters
131
+
132
+ - **Temperature**: {sampling_params.temperature}
133
+ - **Top P**: {sampling_params.top_p}
134
+ - **Top K**: {sampling_params.top_k}
135
+ - **Min P**: {sampling_params.min_p}
136
+ - **Max Tokens**: {sampling_params.max_tokens}
137
+ - **Repetition Penalty**: {sampling_params.repetition_penalty}
138
+
139
+ ### Hardware Configuration
140
+
141
+ - **Tensor Parallel Size**: {tensor_parallel_size}
142
+ - **GPU Configuration**: {tensor_parallel_size} GPU(s)
143
+
144
+ ## Dataset Structure
145
+
146
+ The dataset contains all columns from the source dataset plus:
147
+ - `response`: The generated response from the model
148
+
149
+ ## Generation Script
150
+
151
+ Generated using the vLLM inference script from [uv-scripts/vllm](https://huggingface.co/datasets/uv-scripts/vllm).
152
+
153
+ To reproduce this generation:
154
+
155
+ ```bash
156
+ uv run https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-responses.py \\
157
+ {source_dataset} \\
158
+ <output-dataset> \\
159
+ --model-id {model_id} \\
160
+ {"--prompt-column " + prompt_column if prompt_column else "--messages-column " + messages_column} \\
161
+ --temperature {sampling_params.temperature} \\
162
+ --top-p {sampling_params.top_p} \\
163
+ --top-k {sampling_params.top_k} \\
164
+ --max-tokens {sampling_params.max_tokens}{f" \\\\\\n --max-model-len {max_model_len_used}" if max_model_len_used else ""}
165
+ ```
166
+ """
167
+
168
+
169
+ def main(
170
+ src_dataset_hub_id: str,
171
+ output_dataset_hub_id: str,
172
+ model_id: str = "Qwen/Qwen3-30B-A3B-Instruct-2507",
173
+ messages_column: str = "messages",
174
+ prompt_column: Optional[str] = None,
175
+ output_column: str = "response",
176
+ temperature: float = 0.7,
177
+ top_p: float = 0.8,
178
+ top_k: int = 20,
179
+ min_p: float = 0.0,
180
+ max_tokens: int = 16384,
181
+ repetition_penalty: float = 1.0,
182
+ gpu_memory_utilization: float = 0.90,
183
+ max_model_len: Optional[int] = None,
184
+ tensor_parallel_size: Optional[int] = None,
185
+ skip_long_prompts: bool = True,
186
+ max_samples: Optional[int] = None,
187
+ hf_token: Optional[str] = None,
188
+ ):
189
+ """
190
+ Main generation pipeline.
191
+
192
+ Args:
193
+ src_dataset_hub_id: Input dataset on Hugging Face Hub
194
+ output_dataset_hub_id: Where to save results on Hugging Face Hub
195
+ model_id: Hugging Face model ID for generation
196
+ messages_column: Column name containing chat messages
197
+ prompt_column: Column name containing plain text prompts (alternative to messages_column)
198
+ output_column: Column name for generated responses
199
+ temperature: Sampling temperature
200
+ top_p: Top-p sampling parameter
201
+ top_k: Top-k sampling parameter
202
+ min_p: Minimum probability threshold
203
+ max_tokens: Maximum tokens to generate
204
+ repetition_penalty: Repetition penalty parameter
205
+ gpu_memory_utilization: GPU memory utilization factor
206
+ max_model_len: Maximum model context length (None uses model default)
207
+ tensor_parallel_size: Number of GPUs to use (auto-detect if None)
208
+ skip_long_prompts: Skip prompts exceeding max_model_len instead of failing
209
+ max_samples: Maximum number of samples to process (None for all)
210
+ hf_token: Hugging Face authentication token
211
+ """
212
+ generation_start_time = datetime.now().isoformat()
213
+
214
+ # GPU check and configuration
215
+ num_gpus = check_gpu_availability()
216
+ if tensor_parallel_size is None:
217
+ tensor_parallel_size = num_gpus
218
+ logger.info(
219
+ f"Auto-detected {num_gpus} GPU(s), using tensor_parallel_size={tensor_parallel_size}"
220
+ )
221
+ else:
222
+ logger.info(f"Using specified tensor_parallel_size={tensor_parallel_size}")
223
+ if tensor_parallel_size > num_gpus:
224
+ logger.warning(
225
+ f"Requested {tensor_parallel_size} GPUs but only {num_gpus} available"
226
+ )
227
+
228
+ # Authentication - try multiple methods
229
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") or get_token()
230
+
231
+ if not HF_TOKEN:
232
+ logger.error("No HuggingFace token found. Please provide token via:")
233
+ logger.error(" 1. --hf-token argument")
234
+ logger.error(" 2. HF_TOKEN environment variable")
235
+ logger.error(" 3. Run 'huggingface-cli login' or use login() in Python")
236
+ sys.exit(1)
237
+
238
+ logger.info("HuggingFace token found, authenticating...")
239
+ login(token=HF_TOKEN)
240
+
241
+ # Initialize vLLM
242
+ logger.info(f"Loading model: {model_id}")
243
+ vllm_kwargs = {
244
+ "model": model_id,
245
+ "tensor_parallel_size": tensor_parallel_size,
246
+ "gpu_memory_utilization": gpu_memory_utilization,
247
+ }
248
+ if max_model_len is not None:
249
+ vllm_kwargs["max_model_len"] = max_model_len
250
+ logger.info(f"Using max_model_len={max_model_len}")
251
+
252
+ llm = LLM(**vllm_kwargs)
253
+
254
+ # Load tokenizer for chat template
255
+ logger.info("Loading tokenizer...")
256
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
257
+ guided_decoding_params = GuidedDecodingParams(choice=["safety", "density", "traffic/parking", "home values", "classism", "affordability", "racial resentment", "not needed", "unclear/other", "BLANK"])
258
+
259
+ # Create sampling parameters
260
+ sampling_params = SamplingParams(
261
+ temperature=temperature,
262
+ top_p=top_p,
263
+ top_k=top_k,
264
+ min_p=min_p,
265
+ max_tokens=max_tokens,
266
+ repetition_penalty=repetition_penalty,
267
+ guided_decoding=guided_decoding_params
268
+ )
269
+
270
+ # Load dataset
271
+ logger.info(f"Loading dataset: {src_dataset_hub_id}")
272
+ dataset = load_dataset(src_dataset_hub_id, split="train")
273
+
274
+ # Apply max_samples if specified
275
+ if max_samples is not None and max_samples < len(dataset):
276
+ logger.info(f"Limiting dataset to {max_samples} samples")
277
+ dataset = dataset.select(range(max_samples))
278
+
279
+ total_examples = len(dataset)
280
+ logger.info(f"Dataset loaded with {total_examples:,} examples")
281
+
282
+ # Determine which column to use and validate
283
+ if prompt_column:
284
+ # Use prompt column mode
285
+ if prompt_column not in dataset.column_names:
286
+ logger.error(
287
+ f"Column '{prompt_column}' not found. Available columns: {dataset.column_names}"
288
+ )
289
+ sys.exit(1)
290
+ logger.info(f"Using prompt column mode with column: '{prompt_column}'")
291
+ use_messages = False
292
+ else:
293
+ # Use messages column mode
294
+ if messages_column not in dataset.column_names:
295
+ logger.error(
296
+ f"Column '{messages_column}' not found. Available columns: {dataset.column_names}"
297
+ )
298
+ sys.exit(1)
299
+ logger.info(f"Using messages column mode with column: '{messages_column}'")
300
+ use_messages = True
301
+
302
+ # Get effective max length for filtering
303
+ if max_model_len is not None:
304
+ effective_max_len = max_model_len
305
+ else:
306
+ # Get model's default max length
307
+ effective_max_len = llm.llm_engine.model_config.max_model_len
308
+ logger.info(f"Using effective max model length: {effective_max_len}")
309
+
310
+ # Process messages and apply chat template
311
+ logger.info("Preparing prompts...")
312
+ all_prompts = []
313
+ valid_prompts = []
314
+ valid_indices = []
315
+ skipped_info = []
316
+
317
+ for i, example in enumerate(tqdm(dataset, desc="Processing prompts")):
318
+ if use_messages:
319
+ # Messages mode: use existing chat messages
320
+ messages = example[messages_column]
321
+ # Apply chat template
322
+ prompt = tokenizer.apply_chat_template(
323
+ messages, tokenize=False, add_generation_prompt=True
324
+ )
325
+ else:
326
+ # Prompt mode: convert plain text to messages format
327
+ user_prompt = example[prompt_column]
328
+ messages = [{"role": "user", "content": user_prompt}]
329
+ # Apply chat template
330
+ prompt = tokenizer.apply_chat_template(
331
+ messages, tokenize=False, add_generation_prompt=True
332
+ )
333
+
334
+ all_prompts.append(prompt)
335
+
336
+ # Count tokens if filtering is enabled
337
+ if skip_long_prompts:
338
+ tokens = tokenizer.encode(prompt)
339
+ if len(tokens) <= effective_max_len:
340
+ valid_prompts.append(prompt)
341
+ valid_indices.append(i)
342
+ else:
343
+ skipped_info.append((i, len(tokens)))
344
+ else:
345
+ valid_prompts.append(prompt)
346
+ valid_indices.append(i)
347
+
348
+ # Log filtering results
349
+ if skip_long_prompts and skipped_info:
350
+ logger.warning(
351
+ f"Skipped {len(skipped_info)} prompts that exceed max_model_len ({effective_max_len} tokens)"
352
+ )
353
+ logger.info("Skipped prompt details (first 10):")
354
+ for idx, (prompt_idx, token_count) in enumerate(skipped_info[:10]):
355
+ logger.info(
356
+ f" - Example {prompt_idx}: {token_count} tokens (exceeds by {token_count - effective_max_len})"
357
+ )
358
+ if len(skipped_info) > 10:
359
+ logger.info(f" ... and {len(skipped_info) - 10} more")
360
+
361
+ skip_percentage = (len(skipped_info) / total_examples) * 100
362
+ if skip_percentage > 10:
363
+ logger.warning(f"WARNING: {skip_percentage:.1f}% of prompts were skipped!")
364
+
365
+ if not valid_prompts:
366
+ logger.error("No valid prompts to process after filtering!")
367
+ sys.exit(1)
368
+
369
+ # Generate responses - vLLM handles batching internally
370
+ logger.info(f"Starting generation for {len(valid_prompts):,} valid prompts...")
371
+ logger.info("vLLM will handle batching and scheduling automatically")
372
+
373
+ outputs = llm.generate(valid_prompts, sampling_params)
374
+
375
+ # Extract generated text and create full response list
376
+ logger.info("Extracting generated responses...")
377
+ responses = [""] * total_examples # Initialize with empty strings
378
+
379
+ for idx, output in enumerate(outputs):
380
+ original_idx = valid_indices[idx]
381
+ response = output.outputs[0].text.strip()
382
+ responses[original_idx] = response
383
+
384
+ # Add responses to dataset
385
+ logger.info("Adding responses to dataset...")
386
+ dataset = dataset.add_column(output_column, responses)
387
+
388
+ # Create dataset card
389
+ logger.info("Creating dataset card...")
390
+ card_content = create_dataset_card(
391
+ source_dataset=src_dataset_hub_id,
392
+ model_id=model_id,
393
+ messages_column=messages_column,
394
+ prompt_column=prompt_column,
395
+ sampling_params=sampling_params,
396
+ tensor_parallel_size=tensor_parallel_size,
397
+ num_examples=total_examples,
398
+ generation_time=generation_start_time,
399
+ num_skipped=len(skipped_info) if skip_long_prompts else 0,
400
+ max_model_len_used=effective_max_len if skip_long_prompts else None,
401
+ )
402
+
403
+ # Push dataset to hub
404
+ logger.info(f"Pushing dataset to: {output_dataset_hub_id}")
405
+ dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
406
+
407
+ # Push dataset card
408
+ card = DatasetCard(card_content)
409
+ card.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
410
+
411
+ logger.info("✅ Generation complete!")
412
+ logger.info(
413
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset_hub_id}"
414
+ )
415
+
416
+
417
+ if __name__ == "__main__":
418
+ if len(sys.argv) > 1:
419
+ parser = argparse.ArgumentParser(
420
+ description="Generate responses for dataset prompts using vLLM",
421
+ formatter_class=argparse.RawDescriptionHelpFormatter,
422
+ epilog="""
423
+ Examples:
424
+ # Basic usage with default Qwen model
425
+ uv run generate-responses.py input-dataset output-dataset
426
+
427
+ # With custom model and parameters
428
+ uv run generate-responses.py input-dataset output-dataset \\
429
+ --model-id meta-llama/Llama-3.1-8B-Instruct \\
430
+ --temperature 0.9 \\
431
+ --max-tokens 2048
432
+
433
+ # Force specific GPU configuration
434
+ uv run generate-responses.py input-dataset output-dataset \\
435
+ --tensor-parallel-size 2 \\
436
+ --gpu-memory-utilization 0.95
437
+
438
+ # Using environment variable for token
439
+ HF_TOKEN=hf_xxx uv run generate-responses.py input-dataset output-dataset
440
+ """,
441
+ )
442
+
443
+ parser.add_argument(
444
+ "src_dataset_hub_id",
445
+ help="Input dataset on Hugging Face Hub (e.g., username/dataset-name)",
446
+ )
447
+ parser.add_argument(
448
+ "output_dataset_hub_id", help="Output dataset name on Hugging Face Hub"
449
+ )
450
+ parser.add_argument(
451
+ "--model-id",
452
+ type=str,
453
+ default="Qwen/Qwen3-30B-A3B-Instruct-2507",
454
+ help="Model to use for generation (default: Qwen3-30B-A3B-Instruct-2507)",
455
+ )
456
+ parser.add_argument(
457
+ "--messages-column",
458
+ type=str,
459
+ default="messages",
460
+ help="Column containing chat messages (default: messages)",
461
+ )
462
+ parser.add_argument(
463
+ "--prompt-column",
464
+ type=str,
465
+ help="Column containing plain text prompts (alternative to --messages-column)",
466
+ )
467
+ parser.add_argument(
468
+ "--output-column",
469
+ type=str,
470
+ default="response",
471
+ help="Column name for generated responses (default: response)",
472
+ )
473
+ parser.add_argument(
474
+ "--max-samples",
475
+ type=int,
476
+ help="Maximum number of samples to process (default: all)",
477
+ )
478
+ parser.add_argument(
479
+ "--temperature",
480
+ type=float,
481
+ default=0.7,
482
+ help="Sampling temperature (default: 0.7)",
483
+ )
484
+ parser.add_argument(
485
+ "--top-p",
486
+ type=float,
487
+ default=0.8,
488
+ help="Top-p sampling parameter (default: 0.8)",
489
+ )
490
+ parser.add_argument(
491
+ "--top-k",
492
+ type=int,
493
+ default=20,
494
+ help="Top-k sampling parameter (default: 20)",
495
+ )
496
+ parser.add_argument(
497
+ "--min-p",
498
+ type=float,
499
+ default=0.0,
500
+ help="Minimum probability threshold (default: 0.0)",
501
+ )
502
+ parser.add_argument(
503
+ "--max-tokens",
504
+ type=int,
505
+ default=16384,
506
+ help="Maximum tokens to generate (default: 16384)",
507
+ )
508
+ parser.add_argument(
509
+ "--repetition-penalty",
510
+ type=float,
511
+ default=1.0,
512
+ help="Repetition penalty (default: 1.0)",
513
+ )
514
+ parser.add_argument(
515
+ "--gpu-memory-utilization",
516
+ type=float,
517
+ default=0.90,
518
+ help="GPU memory utilization factor (default: 0.90)",
519
+ )
520
+ parser.add_argument(
521
+ "--max-model-len",
522
+ type=int,
523
+ help="Maximum model context length (default: model's default)",
524
+ )
525
+ parser.add_argument(
526
+ "--tensor-parallel-size",
527
+ type=int,
528
+ help="Number of GPUs to use (default: auto-detect)",
529
+ )
530
+ parser.add_argument(
531
+ "--hf-token",
532
+ type=str,
533
+ help="Hugging Face token (can also use HF_TOKEN env var)",
534
+ )
535
+ parser.add_argument(
536
+ "--skip-long-prompts",
537
+ action="store_true",
538
+ default=True,
539
+ help="Skip prompts that exceed max_model_len instead of failing (default: True)",
540
+ )
541
+ parser.add_argument(
542
+ "--no-skip-long-prompts",
543
+ dest="skip_long_prompts",
544
+ action="store_false",
545
+ help="Fail on prompts that exceed max_model_len",
546
+ )
547
+
548
+ args = parser.parse_args()
549
+
550
+ main(
551
+ src_dataset_hub_id=args.src_dataset_hub_id,
552
+ output_dataset_hub_id=args.output_dataset_hub_id,
553
+ model_id=args.model_id,
554
+ messages_column=args.messages_column,
555
+ prompt_column=args.prompt_column,
556
+ output_column=args.output_column,
557
+ temperature=args.temperature,
558
+ top_p=args.top_p,
559
+ top_k=args.top_k,
560
+ min_p=args.min_p,
561
+ max_tokens=args.max_tokens,
562
+ repetition_penalty=args.repetition_penalty,
563
+ gpu_memory_utilization=args.gpu_memory_utilization,
564
+ max_model_len=args.max_model_len,
565
+ tensor_parallel_size=args.tensor_parallel_size,
566
+ skip_long_prompts=args.skip_long_prompts,
567
+ max_samples=args.max_samples,
568
+ hf_token=args.hf_token,
569
+ )
570
+ else:
571
+ # Show HF Jobs example when run without arguments
572
+ print("""
573
+ vLLM Response Generation Script
574
+ ==============================
575
+
576
+ This script requires arguments. For usage information:
577
+ uv run generate-responses.py --help
578
+
579
+ Example HF Jobs command with multi-GPU:
580
+ # If you're logged in with huggingface-cli, token will be auto-detected
581
+ hf jobs uv run \\
582
+ --flavor l4x4 \\
583
+ https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-responses.py \\
584
+ username/input-dataset \\
585
+ username/output-dataset \\
586
+ --messages-column messages \\
587
+ --model-id Qwen/Qwen3-30B-A3B-Instruct-2507 \\
588
+ --temperature 0.7 \\
589
+ --max-tokens 16384
590
+ """)