tytodd commited on
Commit
2508de4
·
verified ·
1 Parent(s): 47b1372

Upload generate_responses.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. generate_responses.py +15 -0
generate_responses.py CHANGED
@@ -67,6 +67,10 @@ def extract_output_payload(output) -> dict[str, Optional[str]]:
67
  """Convert a vLLM chat result into a dataset-serializable dict."""
68
  completion = output.outputs[0] if getattr(output, "outputs", None) else None
69
  text = getattr(completion, "text", "") if completion is not None else ""
 
 
 
 
70
 
71
  reasoning_content = None
72
  if completion is not None:
@@ -189,6 +193,7 @@ def main(
189
  src_dataset_hub_id: str,
190
  output_dataset_hub_id: str,
191
  model_id: str = "Qwen/Qwen3-30B-A3B-Instruct-2507",
 
192
  messages_column: str = "messages",
193
  prompt_column: Optional[str] = None,
194
  output_column: str = "outputs",
@@ -213,6 +218,7 @@ def main(
213
  src_dataset_hub_id: Input dataset on Hugging Face Hub
214
  output_dataset_hub_id: Where to save results on Hugging Face Hub
215
  model_id: Hugging Face model ID for generation
 
216
  messages_column: Column name containing chat messages
217
  prompt_column: Column name containing plain text prompts (alternative to messages_column)
218
  output_column: Column name for generated responses
@@ -265,6 +271,9 @@ def main(
265
  if max_model_len is not None:
266
  vllm_kwargs["max_model_len"] = max_model_len
267
  logger.info(f"Using max_model_len={max_model_len}")
 
 
 
268
 
269
  llm = LLM(**vllm_kwargs)
270
 
@@ -418,6 +427,11 @@ Examples:
418
  default="messages",
419
  help="Column containing chat messages (default: messages)",
420
  )
 
 
 
 
 
421
  parser.add_argument(
422
  "--prompt-column",
423
  type=str,
@@ -516,6 +530,7 @@ Examples:
516
  src_dataset_hub_id=args.src_dataset_hub_id,
517
  output_dataset_hub_id=args.output_dataset_hub_id,
518
  model_id=args.model_id,
 
519
  messages_column=args.messages_column,
520
  prompt_column=args.prompt_column,
521
  output_column=args.output_column,
 
67
  """Convert a vLLM chat result into a dataset-serializable dict."""
68
  completion = output.outputs[0] if getattr(output, "outputs", None) else None
69
  text = getattr(completion, "text", "") if completion is not None else ""
70
+ print("COMPLETION", completion)
71
+ print("hasattr reasoning_content", hasattr(completion, "reasoning_content"))
72
+ print("hasattr reasoning", hasattr(completion, "reasoning"))
73
+ print("completion attrs", dir(completion))
74
 
75
  reasoning_content = None
76
  if completion is not None:
 
193
  src_dataset_hub_id: str,
194
  output_dataset_hub_id: str,
195
  model_id: str = "Qwen/Qwen3-30B-A3B-Instruct-2507",
196
+ reasoning_parser: Optional[str] = None,
197
  messages_column: str = "messages",
198
  prompt_column: Optional[str] = None,
199
  output_column: str = "outputs",
 
218
  src_dataset_hub_id: Input dataset on Hugging Face Hub
219
  output_dataset_hub_id: Where to save results on Hugging Face Hub
220
  model_id: Hugging Face model ID for generation
221
+ reasoning_parser: Optional vLLM reasoning parser to enable structured reasoning extraction
222
  messages_column: Column name containing chat messages
223
  prompt_column: Column name containing plain text prompts (alternative to messages_column)
224
  output_column: Column name for generated responses
 
271
  if max_model_len is not None:
272
  vllm_kwargs["max_model_len"] = max_model_len
273
  logger.info(f"Using max_model_len={max_model_len}")
274
+ if reasoning_parser is not None:
275
+ vllm_kwargs["reasoning_parser"] = reasoning_parser
276
+ logger.info(f"Using reasoning_parser={reasoning_parser}")
277
 
278
  llm = LLM(**vllm_kwargs)
279
 
 
427
  default="messages",
428
  help="Column containing chat messages (default: messages)",
429
  )
430
+ parser.add_argument(
431
+ "--reasoning-parser",
432
+ type=str,
433
+ help="vLLM reasoning parser to use for supported models",
434
+ )
435
  parser.add_argument(
436
  "--prompt-column",
437
  type=str,
 
530
  src_dataset_hub_id=args.src_dataset_hub_id,
531
  output_dataset_hub_id=args.output_dataset_hub_id,
532
  model_id=args.model_id,
533
+ reasoning_parser=args.reasoning_parser,
534
  messages_column=args.messages_column,
535
  prompt_column=args.prompt_column,
536
  output_column=args.output_column,