| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """ |
| | Generate responses with transparent reasoning using OpenAI GPT OSS models with harmony format. |
| | |
| | This script uses the official openai_harmony library for proper message formatting |
| | and channel parsing, as recommended in the OpenAI cookbook. |
| | |
| | Example usage: |
| | # Generate haiku with reasoning |
| | uv run gpt_oss_vllm_harmony.py \\ |
| | --input-dataset davanstrien/haiku_dpo \\ |
| | --output-dataset username/haiku-reasoning \\ |
| | --prompt-column question |
| | |
| | # Any prompt dataset with custom settings |
| | uv run gpt_oss_vllm_harmony.py \\ |
| | --input-dataset username/prompts \\ |
| | --output-dataset username/responses-with-reasoning \\ |
| | --prompt-column prompt \\ |
| | --reasoning-level high \\ |
| | --max-samples 100 |
| | |
| | # HF Jobs execution |
| | hf jobs uv run --flavor a10g-small \\ |
| | https://huggingface.co/datasets/uv-scripts/openai-reasoning/raw/main/gpt_oss_vllm_harmony.py \\ |
| | --input-dataset username/prompts \\ |
| | --output-dataset username/responses-with-reasoning |
| | """ |
| |
|
| | import argparse |
| | import json |
| | import logging |
| | import os |
| | import sys |
| | import time |
| | from datetime import datetime |
| | from typing import Dict, List, Optional |
| |
|
| | from datasets import Dataset, load_dataset |
| | from huggingface_hub import DatasetCard, get_token, login |
| | from openai_harmony import ( |
| | HarmonyEncodingName, |
| | load_harmony_encoding, |
| | Conversation, |
| | Message, |
| | Role, |
| | SystemContent, |
| | DeveloperContent, |
| | ) |
| | from torch import cuda |
| | from tqdm.auto import tqdm |
| | from vllm import LLM, SamplingParams |
| |
|
| | |
| | os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" |
| |
|
| | |
| | logging.basicConfig( |
| | level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s" |
| | ) |
| | logger = logging.getLogger(__name__) |
| |
|
| |
|
| | def check_gpu_availability() -> int: |
| | """Check if CUDA is available and return the number of GPUs.""" |
| | if not cuda.is_available(): |
| | logger.error("CUDA is not available. This script requires a GPU.") |
| | logger.error( |
| | "Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor." |
| | ) |
| | sys.exit(1) |
| |
|
| | num_gpus = cuda.device_count() |
| | for i in range(num_gpus): |
| | gpu_name = cuda.get_device_name(i) |
| | gpu_memory = cuda.get_device_properties(i).total_memory / 1024**3 |
| | logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory") |
| |
|
| | return num_gpus |
| |
|
| |
|
| | def parse_harmony_messages(entries: List, prompt: str) -> Dict[str, str]: |
| | """ |
| | Parse harmony message entries into think/content structure. |
| | |
| | The harmony format produces structured messages with different channels: |
| | - analysis: Chain of thought reasoning |
| | - final: User-facing response |
| | - commentary: Tool calls (if any) |
| | """ |
| | think = "" |
| | content = "" |
| | |
| | |
| | logger.debug(f"[VERBOSE] Parsing {len(entries)} harmony entries") |
| | |
| | for i, entry in enumerate(entries): |
| | entry_dict = entry.to_dict() |
| | logger.debug(f"[VERBOSE] Entry {i}: {json.dumps(entry_dict, indent=2)}") |
| | |
| | |
| | if "content" in entry_dict: |
| | if isinstance(entry_dict["content"], list): |
| | for content_item in entry_dict["content"]: |
| | if content_item.get("type") == "text": |
| | text = content_item.get("text", "") |
| | |
| | |
| | if "analysis" in str(entry_dict).lower() or i == 0: |
| | think += text + "\n" |
| | else: |
| | content += text + "\n" |
| | elif isinstance(entry_dict["content"], str): |
| | |
| | if i == 0: |
| | think = entry_dict["content"] |
| | else: |
| | content = entry_dict["content"] |
| | |
| | |
| | think = think.strip() |
| | content = content.strip() |
| | |
| | |
| | if not think and not content and entries: |
| | content = str(entries[0].to_dict()) |
| | |
| | return { |
| | "prompt": prompt, |
| | "think": think, |
| | "content": content, |
| | "raw_output": json.dumps([e.to_dict() for e in entries], indent=2) |
| | } |
| |
|
| |
|
| | def create_dataset_card( |
| | input_dataset: str, |
| | model_id: str, |
| | prompt_column: str, |
| | reasoning_level: str, |
| | num_examples: int, |
| | generation_time: str, |
| | tensor_parallel_size: int, |
| | temperature: float, |
| | max_tokens: int, |
| | ) -> str: |
| | """Create a dataset card documenting the generation process.""" |
| | return f"""--- |
| | tags: |
| | - generated |
| | - synthetic |
| | - reasoning |
| | - openai-gpt-oss |
| | - harmony-format |
| | --- |
| | |
| | # Generated Responses with Reasoning (Harmony Format) |
| | |
| | This dataset contains AI-generated responses with transparent chain-of-thought reasoning using OpenAI GPT OSS models and the official harmony format. |
| | |
| | ## Generation Details |
| | |
| | - **Source Dataset**: [{input_dataset}](https://huggingface.co/datasets/{input_dataset}) |
| | - **Model**: [{model_id}](https://huggingface.co/{model_id}) |
| | - **Reasoning Level**: {reasoning_level} |
| | - **Number of Examples**: {num_examples:,} |
| | - **Generation Date**: {generation_time} |
| | - **Format**: Official OpenAI Harmony format |
| | |
| | ## Dataset Structure |
| | |
| | Each example contains: |
| | - `prompt`: The input prompt from the source dataset |
| | - `think`: The model's internal reasoning process (analysis channel) |
| | - `content`: The final response (final channel) |
| | - `raw_output`: Complete harmony format output |
| | - `reasoning_level`: The reasoning effort level used |
| | - `model`: Model identifier |
| | |
| | ## Generation Script |
| | |
| | Generated using [uv-scripts/openai-reasoning](https://huggingface.co/datasets/uv-scripts/openai-reasoning) with official harmony format. |
| | |
| | To reproduce: |
| | ```bash |
| | uv run gpt_oss_vllm_harmony.py \\ |
| | --input-dataset {input_dataset} \\ |
| | --output-dataset <your-dataset> \\ |
| | --prompt-column {prompt_column} \\ |
| | --model-id {model_id} \\ |
| | --reasoning-level {reasoning_level} |
| | ``` |
| | """ |
| |
|
| |
|
| | def main( |
| | input_dataset: str, |
| | output_dataset_hub_id: str, |
| | prompt_column: str = "prompt", |
| | model_id: str = "openai/gpt-oss-20b", |
| | reasoning_level: str = "high", |
| | max_samples: Optional[int] = None, |
| | temperature: float = 0.7, |
| | max_tokens: int = 512, |
| | gpu_memory_utilization: float = 0.90, |
| | tensor_parallel_size: Optional[int] = None, |
| | hf_token: Optional[str] = None, |
| | ): |
| | """ |
| | Main generation pipeline using official harmony format. |
| | |
| | Args: |
| | input_dataset: Source dataset on Hugging Face Hub |
| | output_dataset_hub_id: Where to save results on Hugging Face Hub |
| | prompt_column: Column containing the prompts |
| | model_id: OpenAI GPT OSS model to use |
| | reasoning_level: Reasoning effort level (high/medium/low) |
| | max_samples: Maximum number of samples to process |
| | temperature: Sampling temperature |
| | max_tokens: Maximum tokens to generate |
| | gpu_memory_utilization: GPU memory utilization factor |
| | tensor_parallel_size: Number of GPUs to use (auto-detect if None) |
| | hf_token: Hugging Face authentication token |
| | """ |
| | generation_start_time = datetime.now().isoformat() |
| |
|
| | |
| | num_gpus = check_gpu_availability() |
| | if tensor_parallel_size is None: |
| | tensor_parallel_size = num_gpus |
| | logger.info( |
| | f"Auto-detected {num_gpus} GPU(s), using tensor_parallel_size={tensor_parallel_size}" |
| | ) |
| |
|
| | |
| | HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") or get_token() |
| |
|
| | if not HF_TOKEN: |
| | logger.error("No HuggingFace token found. Please provide token via:") |
| | logger.error(" 1. --hf-token argument") |
| | logger.error(" 2. HF_TOKEN environment variable") |
| | logger.error(" 3. Run 'huggingface-cli login'") |
| | sys.exit(1) |
| |
|
| | logger.info("HuggingFace token found, authenticating...") |
| | login(token=HF_TOKEN) |
| |
|
| | |
| | logger.info("Loading harmony encoding...") |
| | encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS) |
| | |
| | |
| | stop_token_ids = encoding.stop_tokens_for_assistant_actions() |
| | logger.info(f"[VERBOSE] Harmony stop token IDs: {stop_token_ids}") |
| |
|
| | |
| | logger.info(f"Loading model: {model_id}") |
| | logger.info("Note: vLLM will handle batching automatically for optimal throughput") |
| | try: |
| | llm = LLM( |
| | model=model_id, |
| | tensor_parallel_size=tensor_parallel_size, |
| | gpu_memory_utilization=gpu_memory_utilization, |
| | trust_remote_code=True, |
| | dtype="bfloat16", |
| | ) |
| | logger.info("[VERBOSE] Model loaded successfully") |
| | except Exception as e: |
| | logger.error(f"Failed to load model with vLLM: {e}") |
| | if "mxfp4" in str(e).lower(): |
| | logger.error("This appears to be a quantization format issue.") |
| | logger.error("The model uses mxfp4 quantization which requires specific support.") |
| | sys.exit(1) |
| |
|
| | |
| | sampling_params = SamplingParams( |
| | temperature=temperature, |
| | max_tokens=max_tokens, |
| | stop_token_ids=stop_token_ids, |
| | ) |
| | logger.info(f"[VERBOSE] Sampling params: temp={temperature}, max_tokens={max_tokens}") |
| |
|
| | |
| | logger.info(f"Loading dataset: {input_dataset}") |
| | dataset = load_dataset(input_dataset, split="train") |
| |
|
| | |
| | if prompt_column not in dataset.column_names: |
| | logger.error( |
| | f"Column '{prompt_column}' not found. Available columns: {dataset.column_names}" |
| | ) |
| | sys.exit(1) |
| |
|
| | |
| | if max_samples: |
| | dataset = dataset.select(range(min(max_samples, len(dataset)))) |
| | total_examples = len(dataset) |
| | logger.info(f"Processing {total_examples:,} examples") |
| |
|
| | |
| | logger.info(f"Preparing prompts with harmony format and reasoning_level={reasoning_level}...") |
| | prefill_ids_list = [] |
| | prompts = [] |
| |
|
| | for i, example in enumerate(tqdm(dataset, desc="Preparing prompts")): |
| | prompt_text = example[prompt_column] |
| | prompts.append(prompt_text) |
| | |
| | |
| | |
| | developer_content = DeveloperContent.new() |
| | if reasoning_level: |
| | developer_content = developer_content.with_instructions( |
| | f"Reasoning: {reasoning_level}" |
| | ) |
| | |
| | convo = Conversation.from_messages([ |
| | Message.from_role_and_content(Role.SYSTEM, SystemContent.new()), |
| | Message.from_role_and_content(Role.DEVELOPER, developer_content), |
| | Message.from_role_and_content(Role.USER, prompt_text), |
| | ]) |
| | |
| | |
| | prefill_ids = encoding.render_conversation_for_completion(convo, Role.ASSISTANT) |
| | prefill_ids_list.append(prefill_ids) |
| | |
| | |
| | if i < 10: |
| | logger.info(f"[VERBOSE] Example {i} original text: {prompt_text[:200]}...") |
| | logger.info(f"[VERBOSE] Example {i} prefill length: {len(prefill_ids)} tokens") |
| |
|
| | |
| | logger.info(f"Starting generation for {len(prefill_ids_list):,} prompts...") |
| | logger.info("[VERBOSE] Using prompt_token_ids for generation") |
| | |
| | start_time = time.time() |
| | outputs = llm.generate( |
| | prompt_token_ids=prefill_ids_list, |
| | sampling_params=sampling_params, |
| | ) |
| | end_time = time.time() |
| | |
| | generation_time = end_time - start_time |
| | logger.info(f"\n[VERBOSE] Generation Performance Metrics:") |
| | logger.info(f"[VERBOSE] - Total time: {generation_time:.2f} seconds") |
| | logger.info(f"[VERBOSE] - Throughput: {len(outputs) / generation_time:.2f} prompts/second") |
| | logger.info(f"[VERBOSE] - Average time per prompt: {generation_time / len(outputs):.2f} seconds") |
| |
|
| | |
| | logger.info("Parsing generated outputs with harmony format...") |
| | results = [] |
| | |
| | |
| | parse_stats = {"success": 0, "empty": 0, "error": 0} |
| |
|
| | for i, output in enumerate(tqdm(outputs, desc="Parsing outputs")): |
| | gen = output.outputs[0] |
| | text = gen.text |
| | output_tokens = gen.token_ids |
| | |
| | logger.debug(f"[VERBOSE] Output {i}: {len(output_tokens)} tokens, {len(text)} chars") |
| | |
| | try: |
| | |
| | entries = encoding.parse_messages_from_completion_tokens(output_tokens, Role.ASSISTANT) |
| | |
| | |
| | parsed = parse_harmony_messages(entries, prompts[i]) |
| | |
| | if parsed["think"] or parsed["content"]: |
| | parse_stats["success"] += 1 |
| | else: |
| | parse_stats["empty"] += 1 |
| | |
| | |
| | if i < 10: |
| | logger.info(f"\n[VERBOSE] ========== Example {i} Output ==========") |
| | logger.info(f"[VERBOSE] Original prompt: {prompts[i][:200]}...") |
| | logger.info(f"[VERBOSE] Raw text output: {text}") |
| | logger.info(f"[VERBOSE] Harmony entries: {len(entries)}") |
| | for j, entry in enumerate(entries): |
| | logger.info(f"[VERBOSE] Entry {j}: {json.dumps(entry.to_dict(), indent=2)}") |
| | logger.info(f"[VERBOSE] Parsed think ({len(parsed['think'])} chars): {parsed['think'][:500]}...") |
| | logger.info(f"[VERBOSE] Parsed content ({len(parsed['content'])} chars): {parsed['content'][:500]}...") |
| | logger.info(f"[VERBOSE] ====================================\n") |
| | |
| | except Exception as e: |
| | logger.error(f"[VERBOSE] Error parsing output {i}: {e}") |
| | parse_stats["error"] += 1 |
| | |
| | parsed = { |
| | "prompt": prompts[i], |
| | "think": "", |
| | "content": text, |
| | "raw_output": text |
| | } |
| | |
| | result = { |
| | "prompt": parsed["prompt"], |
| | "think": parsed["think"], |
| | "content": parsed["content"], |
| | "raw_output": parsed["raw_output"], |
| | "reasoning_level": reasoning_level, |
| | "model": model_id, |
| | } |
| | results.append(result) |
| | |
| | |
| | logger.info(f"\n[VERBOSE] Parsing Statistics:") |
| | logger.info(f"[VERBOSE] - Successfully parsed: {parse_stats['success']} ({parse_stats['success']/len(outputs)*100:.1f}%)") |
| | logger.info(f"[VERBOSE] - Empty results: {parse_stats['empty']} ({parse_stats['empty']/len(outputs)*100:.1f}%)") |
| | logger.info(f"[VERBOSE] - Parse errors: {parse_stats['error']} ({parse_stats['error']/len(outputs)*100:.1f}%)") |
| |
|
| | |
| | logger.info("Creating output dataset...") |
| | output_dataset = Dataset.from_list(results) |
| |
|
| | |
| | logger.info("Creating dataset card...") |
| | card_content = create_dataset_card( |
| | input_dataset=input_dataset, |
| | model_id=model_id, |
| | prompt_column=prompt_column, |
| | reasoning_level=reasoning_level, |
| | num_examples=total_examples, |
| | generation_time=generation_start_time, |
| | tensor_parallel_size=tensor_parallel_size, |
| | temperature=temperature, |
| | max_tokens=max_tokens, |
| | ) |
| |
|
| | |
| | logger.info(f"Pushing dataset to: {output_dataset_hub_id}") |
| | output_dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN) |
| |
|
| | |
| | card = DatasetCard(card_content) |
| | card.push_to_hub(output_dataset_hub_id, token=HF_TOKEN) |
| |
|
| | logger.info("✅ Generation complete!") |
| | logger.info( |
| | f"Dataset available at: https://huggingface.co/datasets/{output_dataset_hub_id}" |
| | ) |
| | |
| | |
| | logger.info(f"\n[VERBOSE] ========== FINAL SUMMARY ==========") |
| | logger.info(f"[VERBOSE] Model: {model_id}") |
| | logger.info(f"[VERBOSE] Reasoning level: {reasoning_level}") |
| | logger.info(f"[VERBOSE] Examples processed: {total_examples}") |
| | logger.info(f"[VERBOSE] Temperature: {temperature}") |
| | logger.info(f"[VERBOSE] Max tokens: {max_tokens}") |
| | logger.info(f"[VERBOSE] GPU config: {tensor_parallel_size} GPU(s)") |
| | logger.info(f"[VERBOSE] ====================================") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | if len(sys.argv) > 1: |
| | parser = argparse.ArgumentParser( |
| | description="Generate responses with reasoning using OpenAI GPT OSS models (Harmony format)", |
| | formatter_class=argparse.RawDescriptionHelpFormatter, |
| | epilog=""" |
| | Examples: |
| | # Generate haiku with reasoning |
| | uv run gpt_oss_vllm_harmony.py \\ |
| | --input-dataset davanstrien/haiku_dpo \\ |
| | --output-dataset username/haiku-reasoning \\ |
| | --prompt-column question |
| | |
| | # Any prompt dataset |
| | uv run gpt_oss_vllm_harmony.py \\ |
| | --input-dataset username/prompts \\ |
| | --output-dataset username/responses-reasoning \\ |
| | --reasoning-level high \\ |
| | --max-samples 100 |
| | |
| | # Use larger 120B model (requires 4x L40S GPUs) |
| | uv run gpt_oss_vllm_harmony.py \\ |
| | --input-dataset username/prompts \\ |
| | --output-dataset username/responses-reasoning \\ |
| | --model-id openai/gpt-oss-120b \\ |
| | --tensor-parallel-size 4 |
| | """, |
| | ) |
| |
|
| | parser.add_argument( |
| | "--input-dataset", |
| | type=str, |
| | required=True, |
| | help="Input dataset on Hugging Face Hub", |
| | ) |
| | parser.add_argument( |
| | "--output-dataset", |
| | type=str, |
| | required=True, |
| | help="Output dataset name on Hugging Face Hub", |
| | ) |
| | parser.add_argument( |
| | "--prompt-column", |
| | type=str, |
| | default="prompt", |
| | help="Column containing prompts (default: prompt)", |
| | ) |
| | parser.add_argument( |
| | "--model-id", |
| | type=str, |
| | default="openai/gpt-oss-20b", |
| | help="Model to use (default: openai/gpt-oss-20b)", |
| | ) |
| | parser.add_argument( |
| | "--reasoning-level", |
| | type=str, |
| | choices=["high", "medium", "low"], |
| | default="high", |
| | help="Reasoning effort level (default: high)", |
| | ) |
| | parser.add_argument( |
| | "--max-samples", type=int, help="Maximum number of samples to process" |
| | ) |
| | parser.add_argument( |
| | "--temperature", |
| | type=float, |
| | default=0.7, |
| | help="Sampling temperature (default: 0.7)", |
| | ) |
| | parser.add_argument( |
| | "--max-tokens", |
| | type=int, |
| | default=512, |
| | help="Maximum tokens to generate (default: 512)", |
| | ) |
| | parser.add_argument( |
| | "--gpu-memory-utilization", |
| | type=float, |
| | default=0.90, |
| | help="GPU memory utilization (default: 0.90)", |
| | ) |
| | parser.add_argument( |
| | "--tensor-parallel-size", |
| | type=int, |
| | help="Number of GPUs to use (default: auto-detect)", |
| | ) |
| | parser.add_argument( |
| | "--hf-token", |
| | type=str, |
| | help="Hugging Face token (can also use HF_TOKEN env var)", |
| | ) |
| |
|
| | args = parser.parse_args() |
| |
|
| | main( |
| | input_dataset=args.input_dataset, |
| | output_dataset_hub_id=args.output_dataset, |
| | prompt_column=args.prompt_column, |
| | model_id=args.model_id, |
| | reasoning_level=args.reasoning_level, |
| | max_samples=args.max_samples, |
| | temperature=args.temperature, |
| | max_tokens=args.max_tokens, |
| | gpu_memory_utilization=args.gpu_memory_utilization, |
| | tensor_parallel_size=args.tensor_parallel_size, |
| | hf_token=args.hf_token, |
| | ) |
| | else: |
| | |
| | print(""" |
| | OpenAI GPT OSS Reasoning Generation Script (Harmony Format) |
| | ========================================================== |
| | |
| | This script requires arguments. For usage information: |
| | uv run gpt_oss_vllm_harmony.py --help |
| | |
| | Example HF Jobs command for 20B model: |
| | hf jobs uv run \\ |
| | --flavor a10g-large \\ # 20B model requires ~40GB memory |
| | https://huggingface.co/datasets/uv-scripts/openai-reasoning/raw/main/gpt_oss_vllm_harmony.py \\ |
| | --input-dataset davanstrien/haiku_dpo \\ |
| | --output-dataset username/haiku-reasoning \\ |
| | --prompt-column question \\ |
| | --reasoning-level high |
| | |
| | Example HF Jobs command for 120B model: |
| | hf jobs uv run \\ |
| | --flavor l40s-4x \\ # 120B model requires ~240GB memory |
| | https://huggingface.co/datasets/uv-scripts/openai-reasoning/raw/main/gpt_oss_vllm_harmony.py \\ |
| | --input-dataset username/prompts \\ |
| | --output-dataset username/responses-reasoning \\ |
| | --model-id openai/gpt-oss-120b \\ |
| | --reasoning-level high |
| | """) |