gnovoa26 commited on
Commit
7e3b4aa
·
verified ·
1 Parent(s): 2817e39

Upload ihclassify2.py

Browse files
Files changed (1) hide show
  1. ihclassify2.py +589 -0
ihclassify2.py ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "flashinfer-python",
6
+ # "huggingface-hub[hf_transfer]",
7
+ # "hf-xet>= 1.1.7",
8
+ # "torch",
9
+ # "transformers",
10
+ # "vllm>=0.8.5",
11
+ # ]
12
+ #
13
+ # ///
14
+ """
15
+ Generate responses for prompts in a dataset using vLLM for efficient GPU inference.
16
+
17
+ This script loads a dataset from Hugging Face Hub containing chat-formatted messages,
18
+ applies the model's chat template, generates responses using vLLM, and saves the
19
+ results back to the Hub with a comprehensive dataset card.
20
+
21
+ Example usage:
22
+ # Local execution with auto GPU detection
23
+ uv run generate-responses.py \\
24
+ username/input-dataset \\
25
+ username/output-dataset \\
26
+ --messages-column messages
27
+
28
+ # With custom model and sampling parameters
29
+ uv run generate-responses.py \\
30
+ username/input-dataset \\
31
+ username/output-dataset \\
32
+ --model-id meta-llama/Llama-3.1-8B-Instruct \\
33
+ --temperature 0.9 \\
34
+ --top-p 0.95 \\
35
+ --max-tokens 2048
36
+
37
+ # HF Jobs execution (see script output for full command)
38
+ hf jobs uv run --flavor a100x4 ...
39
+ """
40
+
41
+ import argparse
42
+ import logging
43
+ import os
44
+ import sys
45
+ from datetime import datetime
46
+ from typing import Optional
47
+
48
+ from datasets import load_dataset
49
+ from huggingface_hub import DatasetCard, get_token, login
50
+ from torch import cuda
51
+ from tqdm.auto import tqdm
52
+ from transformers import AutoTokenizer
53
+ from vllm import LLM, SamplingParams
54
+ from vllm.sampling_params import GuidedDecodingParams
55
+
56
+ # Enable HF Transfer for faster downloads
57
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
58
+
59
+ logging.basicConfig(
60
+ level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
61
+ )
62
+ logger = logging.getLogger(__name__)
63
+
64
+
65
+ def check_gpu_availability() -> int:
66
+ """Check if CUDA is available and return the number of GPUs."""
67
+ if not cuda.is_available():
68
+ logger.error("CUDA is not available. This script requires a GPU.")
69
+ logger.error(
70
+ "Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor."
71
+ )
72
+ sys.exit(1)
73
+
74
+ num_gpus = cuda.device_count()
75
+ for i in range(num_gpus):
76
+ gpu_name = cuda.get_device_name(i)
77
+ gpu_memory = cuda.get_device_properties(i).total_memory / 1024**3
78
+ logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory")
79
+
80
+ return num_gpus
81
+
82
+
83
+ def create_dataset_card(
84
+ source_dataset: str,
85
+ model_id: str,
86
+ messages_column: str,
87
+ prompt_column: Optional[str],
88
+ sampling_params: SamplingParams,
89
+ tensor_parallel_size: int,
90
+ num_examples: int,
91
+ generation_time: str,
92
+ num_skipped: int = 0,
93
+ max_model_len_used: Optional[int] = None,
94
+ ) -> str:
95
+ """Create a comprehensive dataset card documenting the generation process."""
96
+ filtering_section = ""
97
+ if num_skipped > 0:
98
+ skip_percentage = (num_skipped / num_examples) * 100
99
+ processed = num_examples - num_skipped
100
+ filtering_section = f"""
101
+
102
+ ### Filtering Statistics
103
+
104
+ - **Total Examples**: {num_examples:,}
105
+ - **Processed**: {processed:,} ({100 - skip_percentage:.1f}%)
106
+ - **Skipped (too long)**: {num_skipped:,} ({skip_percentage:.1f}%)
107
+ - **Max Model Length Used**: {max_model_len_used:,} tokens
108
+
109
+ Note: Prompts exceeding the maximum model length were skipped and have empty responses."""
110
+
111
+ return f"""---
112
+ tags:
113
+ - generated
114
+ - vllm
115
+ - uv-script
116
+ ---
117
+
118
+ # Generated Responses Dataset
119
+
120
+ This dataset contains generated responses for prompts from [{source_dataset}](https://huggingface.co/datasets/{source_dataset}).
121
+
122
+ ## Generation Details
123
+
124
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
125
+ - **Input Column**: `{prompt_column if prompt_column else messages_column}` ({"plain text prompts" if prompt_column else "chat messages"})
126
+ - **Model**: [{model_id}](https://huggingface.co/{model_id})
127
+ - **Number of Examples**: {num_examples:,}
128
+ - **Generation Date**: {generation_time}{filtering_section}
129
+
130
+ ### Sampling Parameters
131
+
132
+ - **Temperature**: {sampling_params.temperature}
133
+ - **Top P**: {sampling_params.top_p}
134
+ - **Top K**: {sampling_params.top_k}
135
+ - **Min P**: {sampling_params.min_p}
136
+ - **Max Tokens**: {sampling_params.max_tokens}
137
+ - **Repetition Penalty**: {sampling_params.repetition_penalty}
138
+
139
+ ### Hardware Configuration
140
+
141
+ - **Tensor Parallel Size**: {tensor_parallel_size}
142
+ - **GPU Configuration**: {tensor_parallel_size} GPU(s)
143
+
144
+ ## Dataset Structure
145
+
146
+ The dataset contains all columns from the source dataset plus:
147
+ - `response`: The generated response from the model
148
+
149
+ ## Generation Script
150
+
151
+ Generated using the vLLM inference script from [uv-scripts/vllm](https://huggingface.co/datasets/uv-scripts/vllm).
152
+
153
+ To reproduce this generation:
154
+
155
+ ```bash
156
+ uv run https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-responses.py \\
157
+ {source_dataset} \\
158
+ <output-dataset> \\
159
+ --model-id {model_id} \\
160
+ {"--prompt-column " + prompt_column if prompt_column else "--messages-column " + messages_column} \\
161
+ --temperature {sampling_params.temperature} \\
162
+ --top-p {sampling_params.top_p} \\
163
+ --top-k {sampling_params.top_k} \\
164
+ --max-tokens {sampling_params.max_tokens}{f" \\\\\\n --max-model-len {max_model_len_used}" if max_model_len_used else ""}
165
+ ```
166
+ """
167
+
168
+
169
+ def main(
170
+ src_dataset_hub_id: str,
171
+ output_dataset_hub_id: str,
172
+ model_id: str = "Qwen/Qwen3-30B-A3B-Instruct-2507",
173
+ messages_column: str = "messages",
174
+ prompt_column: Optional[str] = None,
175
+ output_column: str = "response",
176
+ temperature: float = 0.7,
177
+ top_p: float = 0.8,
178
+ top_k: int = 20,
179
+ min_p: float = 0.0,
180
+ max_tokens: int = 16384,
181
+ repetition_penalty: float = 1.0,
182
+ gpu_memory_utilization: float = 0.90,
183
+ max_model_len: Optional[int] = None,
184
+ tensor_parallel_size: Optional[int] = None,
185
+ skip_long_prompts: bool = True,
186
+ max_samples: Optional[int] = None,
187
+ hf_token: Optional[str] = None,
188
+ ):
189
+ """
190
+ Main generation pipeline.
191
+
192
+ Args:
193
+ src_dataset_hub_id: Input dataset on Hugging Face Hub
194
+ output_dataset_hub_id: Where to save results on Hugging Face Hub
195
+ model_id: Hugging Face model ID for generation
196
+ messages_column: Column name containing chat messages
197
+ prompt_column: Column name containing plain text prompts (alternative to messages_column)
198
+ output_column: Column name for generated responses
199
+ temperature: Sampling temperature
200
+ top_p: Top-p sampling parameter
201
+ top_k: Top-k sampling parameter
202
+ min_p: Minimum probability threshold
203
+ max_tokens: Maximum tokens to generate
204
+ repetition_penalty: Repetition penalty parameter
205
+ gpu_memory_utilization: GPU memory utilization factor
206
+ max_model_len: Maximum model context length (None uses model default)
207
+ tensor_parallel_size: Number of GPUs to use (auto-detect if None)
208
+ skip_long_prompts: Skip prompts exceeding max_model_len instead of failing
209
+ max_samples: Maximum number of samples to process (None for all)
210
+ hf_token: Hugging Face authentication token
211
+ """
212
+ generation_start_time = datetime.now().isoformat()
213
+
214
+ # GPU check and configuration
215
+ num_gpus = check_gpu_availability()
216
+ if tensor_parallel_size is None:
217
+ tensor_parallel_size = num_gpus
218
+ logger.info(
219
+ f"Auto-detected {num_gpus} GPU(s), using tensor_parallel_size={tensor_parallel_size}"
220
+ )
221
+ else:
222
+ logger.info(f"Using specified tensor_parallel_size={tensor_parallel_size}")
223
+ if tensor_parallel_size > num_gpus:
224
+ logger.warning(
225
+ f"Requested {tensor_parallel_size} GPUs but only {num_gpus} available"
226
+ )
227
+
228
+ # Authentication - try multiple methods
229
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") or get_token()
230
+
231
+ if not HF_TOKEN:
232
+ logger.error("No HuggingFace token found. Please provide token via:")
233
+ logger.error(" 1. --hf-token argument")
234
+ logger.error(" 2. HF_TOKEN environment variable")
235
+ logger.error(" 3. Run 'huggingface-cli login' or use login() in Python")
236
+ sys.exit(1)
237
+
238
+ logger.info("HuggingFace token found, authenticating...")
239
+ login(token=HF_TOKEN)
240
+
241
+ # Initialize vLLM
242
+ logger.info(f"Loading model: {model_id}")
243
+ vllm_kwargs = {
244
+ "model": model_id,
245
+ "tensor_parallel_size": tensor_parallel_size,
246
+ "gpu_memory_utilization": gpu_memory_utilization,
247
+ }
248
+ if max_model_len is not None:
249
+ vllm_kwargs["max_model_len"] = max_model_len
250
+ logger.info(f"Using max_model_len={max_model_len}")
251
+
252
+ llm = LLM(**vllm_kwargs)
253
+
254
+ # Load tokenizer for chat template
255
+ logger.info("Loading tokenizer...")
256
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
257
+
258
+ # Create sampling parameters
259
+ sampling_params = SamplingParams(
260
+ temperature=temperature,
261
+ top_p=top_p,
262
+ top_k=top_k,
263
+ min_p=min_p,
264
+ max_tokens=max_tokens,
265
+ repetition_penalty=repetition_penalty,
266
+ guided_decoding=guided_decoding_params
267
+ )
268
+
269
+ # Load dataset
270
+ logger.info(f"Loading dataset: {src_dataset_hub_id}")
271
+ dataset = load_dataset(src_dataset_hub_id, split="train")
272
+
273
+ # Apply max_samples if specified
274
+ if max_samples is not None and max_samples < len(dataset):
275
+ logger.info(f"Limiting dataset to {max_samples} samples")
276
+ dataset = dataset.select(range(max_samples))
277
+
278
+ total_examples = len(dataset)
279
+ logger.info(f"Dataset loaded with {total_examples:,} examples")
280
+
281
+ # Determine which column to use and validate
282
+ if prompt_column:
283
+ # Use prompt column mode
284
+ if prompt_column not in dataset.column_names:
285
+ logger.error(
286
+ f"Column '{prompt_column}' not found. Available columns: {dataset.column_names}"
287
+ )
288
+ sys.exit(1)
289
+ logger.info(f"Using prompt column mode with column: '{prompt_column}'")
290
+ use_messages = False
291
+ else:
292
+ # Use messages column mode
293
+ if messages_column not in dataset.column_names:
294
+ logger.error(
295
+ f"Column '{messages_column}' not found. Available columns: {dataset.column_names}"
296
+ )
297
+ sys.exit(1)
298
+ logger.info(f"Using messages column mode with column: '{messages_column}'")
299
+ use_messages = True
300
+
301
+ # Get effective max length for filtering
302
+ if max_model_len is not None:
303
+ effective_max_len = max_model_len
304
+ else:
305
+ # Get model's default max length
306
+ effective_max_len = llm.llm_engine.model_config.max_model_len
307
+ logger.info(f"Using effective max model length: {effective_max_len}")
308
+
309
+ # Process messages and apply chat template
310
+ logger.info("Preparing prompts...")
311
+ all_prompts = []
312
+ valid_prompts = []
313
+ valid_indices = []
314
+ skipped_info = []
315
+
316
+ for i, example in enumerate(tqdm(dataset, desc="Processing prompts")):
317
+ if use_messages:
318
+ # Messages mode: use existing chat messages
319
+ messages = example[messages_column]
320
+ # Apply chat template
321
+ prompt = tokenizer.apply_chat_template(
322
+ messages, tokenize=False, add_generation_prompt=True
323
+ )
324
+ else:
325
+ # Prompt mode: convert plain text to messages format
326
+ user_prompt = example[prompt_column]
327
+ messages = [{"role": "user", "content": user_prompt}]
328
+ # Apply chat template
329
+ prompt = tokenizer.apply_chat_template(
330
+ messages, tokenize=False, add_generation_prompt=True
331
+ )
332
+
333
+ all_prompts.append(prompt)
334
+
335
+ # Count tokens if filtering is enabled
336
+ if skip_long_prompts:
337
+ tokens = tokenizer.encode(prompt)
338
+ if len(tokens) <= effective_max_len:
339
+ valid_prompts.append(prompt)
340
+ valid_indices.append(i)
341
+ else:
342
+ skipped_info.append((i, len(tokens)))
343
+ else:
344
+ valid_prompts.append(prompt)
345
+ valid_indices.append(i)
346
+
347
+ # Log filtering results
348
+ if skip_long_prompts and skipped_info:
349
+ logger.warning(
350
+ f"Skipped {len(skipped_info)} prompts that exceed max_model_len ({effective_max_len} tokens)"
351
+ )
352
+ logger.info("Skipped prompt details (first 10):")
353
+ for idx, (prompt_idx, token_count) in enumerate(skipped_info[:10]):
354
+ logger.info(
355
+ f" - Example {prompt_idx}: {token_count} tokens (exceeds by {token_count - effective_max_len})"
356
+ )
357
+ if len(skipped_info) > 10:
358
+ logger.info(f" ... and {len(skipped_info) - 10} more")
359
+
360
+ skip_percentage = (len(skipped_info) / total_examples) * 100
361
+ if skip_percentage > 10:
362
+ logger.warning(f"WARNING: {skip_percentage:.1f}% of prompts were skipped!")
363
+
364
+ if not valid_prompts:
365
+ logger.error("No valid prompts to process after filtering!")
366
+ sys.exit(1)
367
+
368
+ # Generate responses - vLLM handles batching internally
369
+ logger.info(f"Starting generation for {len(valid_prompts):,} valid prompts...")
370
+ logger.info("vLLM will handle batching and scheduling automatically")
371
+
372
+ outputs = llm.generate(valid_prompts, sampling_params)
373
+
374
+ # Extract generated text and create full response list
375
+ logger.info("Extracting generated responses...")
376
+ responses = [""] * total_examples # Initialize with empty strings
377
+
378
+ for idx, output in enumerate(outputs):
379
+ original_idx = valid_indices[idx]
380
+ response = output.outputs[0].text.strip()
381
+ responses[original_idx] = response
382
+
383
+ # Add responses to dataset
384
+ logger.info("Adding responses to dataset...")
385
+ dataset = dataset.add_column(output_column, responses)
386
+
387
+ # Create dataset card
388
+ logger.info("Creating dataset card...")
389
+ card_content = create_dataset_card(
390
+ source_dataset=src_dataset_hub_id,
391
+ model_id=model_id,
392
+ messages_column=messages_column,
393
+ prompt_column=prompt_column,
394
+ sampling_params=sampling_params,
395
+ tensor_parallel_size=tensor_parallel_size,
396
+ num_examples=total_examples,
397
+ generation_time=generation_start_time,
398
+ num_skipped=len(skipped_info) if skip_long_prompts else 0,
399
+ max_model_len_used=effective_max_len if skip_long_prompts else None,
400
+ )
401
+
402
+ # Push dataset to hub
403
+ logger.info(f"Pushing dataset to: {output_dataset_hub_id}")
404
+ dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
405
+
406
+ # Push dataset card
407
+ card = DatasetCard(card_content)
408
+ card.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
409
+
410
+ logger.info("✅ Generation complete!")
411
+ logger.info(
412
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset_hub_id}"
413
+ )
414
+
415
+
416
+ if __name__ == "__main__":
417
+ if len(sys.argv) > 1:
418
+ parser = argparse.ArgumentParser(
419
+ description="Generate responses for dataset prompts using vLLM",
420
+ formatter_class=argparse.RawDescriptionHelpFormatter,
421
+ epilog="""
422
+ Examples:
423
+ # Basic usage with default Qwen model
424
+ uv run generate-responses.py input-dataset output-dataset
425
+
426
+ # With custom model and parameters
427
+ uv run generate-responses.py input-dataset output-dataset \\
428
+ --model-id meta-llama/Llama-3.1-8B-Instruct \\
429
+ --temperature 0.9 \\
430
+ --max-tokens 2048
431
+
432
+ # Force specific GPU configuration
433
+ uv run generate-responses.py input-dataset output-dataset \\
434
+ --tensor-parallel-size 2 \\
435
+ --gpu-memory-utilization 0.95
436
+
437
+ # Using environment variable for token
438
+ HF_TOKEN=hf_xxx uv run generate-responses.py input-dataset output-dataset
439
+ """,
440
+ )
441
+
442
+ parser.add_argument(
443
+ "src_dataset_hub_id",
444
+ help="Input dataset on Hugging Face Hub (e.g., username/dataset-name)",
445
+ )
446
+ parser.add_argument(
447
+ "output_dataset_hub_id", help="Output dataset name on Hugging Face Hub"
448
+ )
449
+ parser.add_argument(
450
+ "--model-id",
451
+ type=str,
452
+ default="Qwen/Qwen3-30B-A3B-Instruct-2507",
453
+ help="Model to use for generation (default: Qwen3-30B-A3B-Instruct-2507)",
454
+ )
455
+ parser.add_argument(
456
+ "--messages-column",
457
+ type=str,
458
+ default="messages",
459
+ help="Column containing chat messages (default: messages)",
460
+ )
461
+ parser.add_argument(
462
+ "--prompt-column",
463
+ type=str,
464
+ help="Column containing plain text prompts (alternative to --messages-column)",
465
+ )
466
+ parser.add_argument(
467
+ "--output-column",
468
+ type=str,
469
+ default="response",
470
+ help="Column name for generated responses (default: response)",
471
+ )
472
+ parser.add_argument(
473
+ "--max-samples",
474
+ type=int,
475
+ help="Maximum number of samples to process (default: all)",
476
+ )
477
+ parser.add_argument(
478
+ "--temperature",
479
+ type=float,
480
+ default=0.7,
481
+ help="Sampling temperature (default: 0.7)",
482
+ )
483
+ parser.add_argument(
484
+ "--top-p",
485
+ type=float,
486
+ default=0.8,
487
+ help="Top-p sampling parameter (default: 0.8)",
488
+ )
489
+ parser.add_argument(
490
+ "--top-k",
491
+ type=int,
492
+ default=20,
493
+ help="Top-k sampling parameter (default: 20)",
494
+ )
495
+ parser.add_argument(
496
+ "--min-p",
497
+ type=float,
498
+ default=0.0,
499
+ help="Minimum probability threshold (default: 0.0)",
500
+ )
501
+ parser.add_argument(
502
+ "--max-tokens",
503
+ type=int,
504
+ default=16384,
505
+ help="Maximum tokens to generate (default: 16384)",
506
+ )
507
+ parser.add_argument(
508
+ "--repetition-penalty",
509
+ type=float,
510
+ default=1.0,
511
+ help="Repetition penalty (default: 1.0)",
512
+ )
513
+ parser.add_argument(
514
+ "--gpu-memory-utilization",
515
+ type=float,
516
+ default=0.90,
517
+ help="GPU memory utilization factor (default: 0.90)",
518
+ )
519
+ parser.add_argument(
520
+ "--max-model-len",
521
+ type=int,
522
+ help="Maximum model context length (default: model's default)",
523
+ )
524
+ parser.add_argument(
525
+ "--tensor-parallel-size",
526
+ type=int,
527
+ help="Number of GPUs to use (default: auto-detect)",
528
+ )
529
+ parser.add_argument(
530
+ "--hf-token",
531
+ type=str,
532
+ help="Hugging Face token (can also use HF_TOKEN env var)",
533
+ )
534
+ parser.add_argument(
535
+ "--skip-long-prompts",
536
+ action="store_true",
537
+ default=True,
538
+ help="Skip prompts that exceed max_model_len instead of failing (default: True)",
539
+ )
540
+ parser.add_argument(
541
+ "--no-skip-long-prompts",
542
+ dest="skip_long_prompts",
543
+ action="store_false",
544
+ help="Fail on prompts that exceed max_model_len",
545
+ )
546
+
547
+ args = parser.parse_args()
548
+
549
+ main(
550
+ src_dataset_hub_id=args.src_dataset_hub_id,
551
+ output_dataset_hub_id=args.output_dataset_hub_id,
552
+ model_id=args.model_id,
553
+ messages_column=args.messages_column,
554
+ prompt_column=args.prompt_column,
555
+ output_column=args.output_column,
556
+ temperature=args.temperature,
557
+ top_p=args.top_p,
558
+ top_k=args.top_k,
559
+ min_p=args.min_p,
560
+ max_tokens=args.max_tokens,
561
+ repetition_penalty=args.repetition_penalty,
562
+ gpu_memory_utilization=args.gpu_memory_utilization,
563
+ max_model_len=args.max_model_len,
564
+ tensor_parallel_size=args.tensor_parallel_size,
565
+ skip_long_prompts=args.skip_long_prompts,
566
+ max_samples=args.max_samples,
567
+ hf_token=args.hf_token,
568
+ )
569
+ else:
570
+ # Show HF Jobs example when run without arguments
571
+ print("""
572
+ vLLM Response Generation Script
573
+ ==============================
574
+
575
+ This script requires arguments. For usage information:
576
+ uv run generate-responses.py --help
577
+
578
+ Example HF Jobs command with multi-GPU:
579
+ # If you're logged in with huggingface-cli, token will be auto-detected
580
+ hf jobs uv run \\
581
+ --flavor l4x4 \\
582
+ https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-responses.py \\
583
+ username/input-dataset \\
584
+ username/output-dataset \\
585
+ --messages-column messages \\
586
+ --model-id Qwen/Qwen3-30B-A3B-Instruct-2507 \\
587
+ --temperature 0.7 \\
588
+ --max-tokens 16384
589
+ """)