sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
ocrmypdf/OCRmyPDF:tests/test_ocr_engine_interface.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Unit tests for OcrEngine interface extensions.
These tests verify that the OcrEngine ABC has the new generate_ocr() method
and that OcrElement classes are exported from the public API.
"""
from __future__ import annotations
from pathlib import Path
from unittest.mock import MagicMock
import pytest
from ocrmypdf.pluginspec import OcrEngine
class TestOcrEngineInterface:
"""Test that OcrEngine ABC has required methods."""
def test_generate_ocr_method_exists(self):
"""OcrEngine must have generate_ocr() method signature."""
assert hasattr(OcrEngine, 'generate_ocr')
def test_supports_generate_ocr_method_exists(self):
"""OcrEngine must have supports_generate_ocr() method."""
assert hasattr(OcrEngine, 'supports_generate_ocr')
def test_supports_generate_ocr_default_false(self):
"""Default supports_generate_ocr() should return False."""
from ocrmypdf.pluginspec import OrientationConfidence
# Create a minimal concrete implementation
class MinimalEngine(OcrEngine):
@staticmethod
def version():
return "1.0"
@staticmethod
def creator_tag(options):
return "test"
def __str__(self):
return "test"
@staticmethod
def languages(options):
return set()
@staticmethod
def get_orientation(input_file, options):
return OrientationConfidence(0, 0.0)
@staticmethod
def get_deskew(input_file, options):
return 0.0
@staticmethod
def generate_hocr(input_file, output_hocr, output_text, options):
pass
@staticmethod
def generate_pdf(input_file, output_pdf, output_text, options):
pass
engine = MinimalEngine()
assert engine.supports_generate_ocr() is False
def test_generate_ocr_raises_not_implemented_by_default(self):
"""Default generate_ocr() should raise NotImplementedError."""
from ocrmypdf.pluginspec import OrientationConfidence
class MinimalEngine(OcrEngine):
@staticmethod
def version():
return "1.0"
@staticmethod
def creator_tag(options):
return "test"
def __str__(self):
return "test"
@staticmethod
def languages(options):
return set()
@staticmethod
def get_orientation(input_file, options):
return OrientationConfidence(0, 0.0)
@staticmethod
def get_deskew(input_file, options):
return 0.0
@staticmethod
def generate_hocr(input_file, output_hocr, output_text, options):
pass
@staticmethod
def generate_pdf(input_file, output_pdf, output_text, options):
pass
engine = MinimalEngine()
with pytest.raises(NotImplementedError):
engine.generate_ocr(Path("test.png"), MagicMock(), 0)
class TestOcrElementExport:
"""Test that OcrElement is exported from public API."""
def test_ocrelement_importable_from_ocrmypdf(self):
"""OcrElement should be importable from ocrmypdf package."""
from ocrmypdf import OcrElement
assert OcrElement is not None
def test_ocrclass_importable_from_ocrmypdf(self):
"""OcrClass should be importable from ocrmypdf package."""
from ocrmypdf import OcrClass
assert OcrClass is not None
def test_boundingbox_importable_from_ocrmypdf(self):
"""BoundingBox should be importable from ocrmypdf package."""
from ocrmypdf import BoundingBox
assert BoundingBox is not None
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_ocr_engine_interface.py",
"license": "Mozilla Public License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:tests/test_ocr_engine_selection.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Unit tests for OCR engine selection mechanism.
Tests verify that the --ocr-engine option works correctly and that
engine-specific options are available.
"""
from __future__ import annotations
import pytest
class TestOcrEngineCliOption:
"""Test --ocr-engine CLI option."""
def test_ocr_engine_option_exists(self):
"""CLI should have --ocr-engine option."""
from ocrmypdf.cli import get_parser
parser = get_parser()
option_strings = []
for action in parser._actions:
option_strings.extend(action.option_strings)
assert '--ocr-engine' in option_strings
def test_ocr_engine_accepts_tesseract(self):
"""--ocr-engine should accept 'tesseract'."""
from ocrmypdf.cli import get_parser
parser = get_parser()
args = parser.parse_args(['--ocr-engine', 'tesseract', 'in.pdf', 'out.pdf'])
assert args.ocr_engine == 'tesseract'
def test_ocr_engine_accepts_auto(self):
"""--ocr-engine should accept 'auto'."""
from ocrmypdf.cli import get_parser
parser = get_parser()
args = parser.parse_args(['--ocr-engine', 'auto', 'in.pdf', 'out.pdf'])
assert args.ocr_engine == 'auto'
def test_ocr_engine_accepts_none(self):
"""--ocr-engine should accept 'none'."""
from ocrmypdf.cli import get_parser
parser = get_parser()
args = parser.parse_args(['--ocr-engine', 'none', 'in.pdf', 'out.pdf'])
assert args.ocr_engine == 'none'
def test_ocr_engine_default_is_auto(self):
"""--ocr-engine should default to 'auto'."""
from ocrmypdf.cli import get_parser
parser = get_parser()
args = parser.parse_args(['in.pdf', 'out.pdf'])
assert args.ocr_engine == 'auto'
def test_ocr_engine_rejects_invalid(self):
"""--ocr-engine should reject invalid values."""
from ocrmypdf.cli import get_parser
parser = get_parser()
with pytest.raises(SystemExit):
parser.parse_args(['--ocr-engine', 'invalid_engine', 'in.pdf', 'out.pdf'])
class TestOcrEngineOptionsModel:
"""Test OcrOptions has ocr_engine field."""
def test_ocr_options_has_ocr_engine_field(self):
"""OcrOptions should have ocr_engine field."""
from ocrmypdf._options import OcrOptions
# Check field exists in model
assert 'ocr_engine' in OcrOptions.model_fields
class TestOcrEnginePluginSelection:
"""Test that get_ocr_engine() hook selects correct engine based on options."""
def test_tesseract_selected_when_auto(self):
"""TesseractOcrEngine should be returned when ocr_engine='auto'."""
from unittest.mock import MagicMock
from ocrmypdf.builtin_plugins import tesseract_ocr
from ocrmypdf.builtin_plugins.tesseract_ocr import TesseractOcrEngine
options = MagicMock()
options.ocr_engine = 'auto'
engine = tesseract_ocr.get_ocr_engine(options=options)
assert isinstance(engine, TesseractOcrEngine)
def test_tesseract_selected_when_tesseract(self):
"""TesseractOcrEngine should be returned when ocr_engine='tesseract'."""
from unittest.mock import MagicMock
from ocrmypdf.builtin_plugins import tesseract_ocr
from ocrmypdf.builtin_plugins.tesseract_ocr import TesseractOcrEngine
options = MagicMock()
options.ocr_engine = 'tesseract'
engine = tesseract_ocr.get_ocr_engine(options=options)
assert isinstance(engine, TesseractOcrEngine)
def test_null_selected_when_none(self):
"""NullOcrEngine should be returned when ocr_engine='none'."""
from unittest.mock import MagicMock
from ocrmypdf.builtin_plugins import null_ocr
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
options = MagicMock()
options.ocr_engine = 'none'
engine = null_ocr.get_ocr_engine(options=options)
assert isinstance(engine, NullOcrEngine)
def test_null_returns_none_when_auto(self):
"""null_ocr.get_ocr_engine() should return None when ocr_engine='auto'."""
from unittest.mock import MagicMock
from ocrmypdf.builtin_plugins import null_ocr
options = MagicMock()
options.ocr_engine = 'auto'
engine = null_ocr.get_ocr_engine(options=options)
assert engine is None
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_ocr_engine_selection.py",
"license": "Mozilla Public License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:tests/test_pdf_renderer.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Unit tests for Fpdf2PdfRenderer class."""
from __future__ import annotations
from io import StringIO
from pathlib import Path
import pytest
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from ocrmypdf.font import MultiFontManager
from ocrmypdf.fpdf_renderer import DebugRenderOptions, Fpdf2PdfRenderer
from ocrmypdf.helpers import check_pdf
from ocrmypdf.hocrtransform import (
Baseline,
BoundingBox,
OcrClass,
OcrElement,
)
def text_from_pdf(filename: Path) -> str:
"""Extract text from a PDF file using pdfminer."""
output_string = StringIO()
with open(filename, 'rb') as in_file:
parser = PDFParser(in_file)
doc = PDFDocument(parser)
rsrcmgr = PDFResourceManager()
device = TextConverter(rsrcmgr, output_string, laparams=LAParams())
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.create_pages(doc):
interpreter.process_page(page)
return output_string.getvalue()
@pytest.fixture
def font_dir():
"""Get the font directory."""
return Path(__file__).parent.parent / "src" / "ocrmypdf" / "data"
@pytest.fixture
def multi_font_manager(font_dir):
"""Create a MultiFontManager for tests."""
return MultiFontManager(font_dir)
def create_simple_page(
width: float = 1000,
height: float = 500,
words: list[tuple[str, tuple[float, float, float, float]]] | None = None,
) -> OcrElement:
"""Create a simple OcrElement page for testing.
Args:
width: Page width in pixels
height: Page height in pixels
words: List of (text, (left, top, right, bottom)) tuples
Returns:
OcrElement representing the page
"""
if words is None:
words = [("Hello", (100, 100, 200, 150)), ("World", (250, 100, 350, 150))]
word_elements = [
OcrElement(
ocr_class=OcrClass.WORD,
text=text,
bbox=BoundingBox(left=bbox[0], top=bbox[1], right=bbox[2], bottom=bbox[3]),
)
for text, bbox in words
]
line = OcrElement(
ocr_class=OcrClass.LINE,
bbox=BoundingBox(left=100, top=100, right=900, bottom=150),
baseline=Baseline(slope=0.0, intercept=0),
children=word_elements,
)
paragraph = OcrElement(
ocr_class=OcrClass.PARAGRAPH,
bbox=BoundingBox(left=100, top=100, right=900, bottom=150),
direction="ltr",
language="eng",
children=[line],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=width, bottom=height),
children=[paragraph],
)
return page
class TestFpdf2PdfRendererBasic:
"""Basic Fpdf2PdfRenderer functionality tests."""
def test_render_simple_page(self, tmp_path, multi_font_manager):
"""Test rendering a simple page with two words."""
page = create_simple_page()
output_pdf = tmp_path / "simple.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
renderer.render(output_pdf)
assert output_pdf.exists()
check_pdf(str(output_pdf))
def test_rendered_text_extractable(self, tmp_path, multi_font_manager):
"""Test that rendered text can be extracted from the PDF."""
page = create_simple_page()
output_pdf = tmp_path / "extractable.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
renderer.render(output_pdf)
extracted_text = text_from_pdf(output_pdf)
assert "Hello" in extracted_text
assert "World" in extracted_text
def test_invisible_text_mode(self, tmp_path, multi_font_manager):
"""Test that invisible_text=True creates a valid PDF."""
page = create_simple_page()
output_pdf = tmp_path / "invisible.pdf"
renderer = Fpdf2PdfRenderer(
page=page,
dpi=72.0,
multi_font_manager=multi_font_manager,
invisible_text=True,
)
renderer.render(output_pdf)
# Text should still be extractable even when invisible
extracted_text = text_from_pdf(output_pdf)
assert "Hello" in extracted_text
def test_visible_text_mode(self, tmp_path, multi_font_manager):
"""Test that invisible_text=False creates a valid PDF with visible text."""
page = create_simple_page()
output_pdf = tmp_path / "visible.pdf"
renderer = Fpdf2PdfRenderer(
page=page,
dpi=72.0,
multi_font_manager=multi_font_manager,
invisible_text=False,
)
renderer.render(output_pdf)
# Text should be extractable
extracted_text = text_from_pdf(output_pdf)
assert "Hello" in extracted_text
class TestFpdf2PdfRendererPageSize:
"""Test page size calculations."""
def test_page_dimensions(self, tmp_path, multi_font_manager):
"""Test that page dimensions are calculated correctly."""
# 1000x500 pixels at 72 dpi = 1000x500 points
page = create_simple_page(width=1000, height=500)
output_pdf = tmp_path / "dimensions.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
assert renderer.coord_transform.page_width_pt == pytest.approx(1000.0)
assert renderer.coord_transform.page_height_pt == pytest.approx(500.0)
renderer.render(output_pdf)
def test_high_dpi_page(self, tmp_path, multi_font_manager):
"""Test page dimensions at higher DPI."""
# 720x360 pixels at 144 dpi = 360x180 points
page = create_simple_page(width=720, height=360)
output_pdf = tmp_path / "high_dpi.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=144.0, multi_font_manager=multi_font_manager
)
assert renderer.coord_transform.page_width_pt == pytest.approx(360.0)
assert renderer.coord_transform.page_height_pt == pytest.approx(180.0)
renderer.render(output_pdf)
check_pdf(str(output_pdf))
class TestFpdf2PdfRendererMultiLine:
"""Test rendering of multi-line content."""
def test_multiple_lines(self, tmp_path, multi_font_manager):
"""Test rendering multiple lines of text."""
line1_words = [
OcrElement(
ocr_class=OcrClass.WORD,
text="Line",
bbox=BoundingBox(left=100, top=100, right=180, bottom=150),
),
OcrElement(
ocr_class=OcrClass.WORD,
text="one",
bbox=BoundingBox(left=190, top=100, right=250, bottom=150),
),
]
line1 = OcrElement(
ocr_class=OcrClass.LINE,
bbox=BoundingBox(left=100, top=100, right=900, bottom=150),
baseline=Baseline(slope=0.0, intercept=0),
children=line1_words,
)
line2_words = [
OcrElement(
ocr_class=OcrClass.WORD,
text="Line",
bbox=BoundingBox(left=100, top=200, right=180, bottom=250),
),
OcrElement(
ocr_class=OcrClass.WORD,
text="two",
bbox=BoundingBox(left=190, top=200, right=250, bottom=250),
),
]
line2 = OcrElement(
ocr_class=OcrClass.LINE,
bbox=BoundingBox(left=100, top=200, right=900, bottom=250),
baseline=Baseline(slope=0.0, intercept=0),
children=line2_words,
)
paragraph = OcrElement(
ocr_class=OcrClass.PARAGRAPH,
bbox=BoundingBox(left=100, top=100, right=900, bottom=250),
direction="ltr",
language="eng",
children=[line1, line2],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=1000, bottom=500),
children=[paragraph],
)
output_pdf = tmp_path / "multiline.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
renderer.render(output_pdf)
extracted_text = text_from_pdf(output_pdf)
assert "Line" in extracted_text
assert "one" in extracted_text
assert "two" in extracted_text
class TestFpdf2PdfRendererTextDirection:
"""Test rendering of different text directions."""
def test_ltr_text(self, tmp_path, multi_font_manager):
"""Test rendering LTR text."""
page = create_simple_page()
output_pdf = tmp_path / "ltr.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
renderer.render(output_pdf)
check_pdf(str(output_pdf))
def test_rtl_text(self, tmp_path, multi_font_manager):
"""Test rendering RTL text."""
word = OcrElement(
ocr_class=OcrClass.WORD,
text="مرحبا",
bbox=BoundingBox(left=100, top=100, right=200, bottom=150),
)
line = OcrElement(
ocr_class=OcrClass.LINE,
bbox=BoundingBox(left=100, top=100, right=900, bottom=150),
baseline=Baseline(slope=0.0, intercept=0),
direction="rtl",
children=[word],
)
paragraph = OcrElement(
ocr_class=OcrClass.PARAGRAPH,
bbox=BoundingBox(left=100, top=100, right=900, bottom=150),
direction="rtl",
language="ara",
children=[line],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=1000, bottom=500),
children=[paragraph],
)
output_pdf = tmp_path / "rtl.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
renderer.render(output_pdf)
check_pdf(str(output_pdf))
class TestFpdf2PdfRendererBaseline:
"""Test baseline handling in rendering."""
def test_sloped_baseline(self, tmp_path, multi_font_manager):
"""Test rendering with a sloped baseline."""
word = OcrElement(
ocr_class=OcrClass.WORD,
text="Sloped",
bbox=BoundingBox(left=100, top=100, right=200, bottom=150),
)
line = OcrElement(
ocr_class=OcrClass.LINE,
bbox=BoundingBox(left=100, top=100, right=900, bottom=150),
baseline=Baseline(slope=0.02, intercept=-5),
children=[word],
)
paragraph = OcrElement(
ocr_class=OcrClass.PARAGRAPH,
bbox=BoundingBox(left=100, top=100, right=900, bottom=150),
direction="ltr",
language="eng",
children=[line],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=1000, bottom=500),
children=[paragraph],
)
output_pdf = tmp_path / "sloped.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
renderer.render(output_pdf)
check_pdf(str(output_pdf))
extracted_text = text_from_pdf(output_pdf)
assert "Sloped" in extracted_text
class TestFpdf2PdfRendererTextangle:
"""Test textangle (rotation) handling in rendering."""
def test_rotated_text(self, tmp_path, multi_font_manager):
"""Test rendering rotated text."""
word = OcrElement(
ocr_class=OcrClass.WORD,
text="Rotated",
bbox=BoundingBox(left=100, top=100, right=200, bottom=150),
)
line = OcrElement(
ocr_class=OcrClass.LINE,
bbox=BoundingBox(left=100, top=100, right=900, bottom=150),
baseline=Baseline(slope=0.0, intercept=0),
textangle=5.0,
children=[word],
)
paragraph = OcrElement(
ocr_class=OcrClass.PARAGRAPH,
bbox=BoundingBox(left=100, top=100, right=900, bottom=150),
direction="ltr",
language="eng",
children=[line],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=1000, bottom=500),
children=[paragraph],
)
output_pdf = tmp_path / "rotated.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
renderer.render(output_pdf)
check_pdf(str(output_pdf))
extracted_text = text_from_pdf(output_pdf)
assert "Rotated" in extracted_text
class TestFpdf2PdfRendererWordBreaks:
"""Test word rendering."""
def test_word_breaks_english(self, tmp_path, multi_font_manager):
"""Test that words are rendered for English text."""
page = create_simple_page()
output_pdf = tmp_path / "english.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
renderer.render(output_pdf)
extracted_text = text_from_pdf(output_pdf)
# Words should be present
assert "Hello" in extracted_text
assert "World" in extracted_text
def test_cjk_text(self, tmp_path, multi_font_manager):
"""Test rendering CJK text."""
words = [
OcrElement(
ocr_class=OcrClass.WORD,
text="你好",
bbox=BoundingBox(left=100, top=100, right=150, bottom=150),
),
OcrElement(
ocr_class=OcrClass.WORD,
text="世界",
bbox=BoundingBox(left=160, top=100, right=210, bottom=150),
),
]
line = OcrElement(
ocr_class=OcrClass.LINE,
bbox=BoundingBox(left=100, top=100, right=900, bottom=150),
baseline=Baseline(slope=0.0, intercept=0),
children=words,
)
paragraph = OcrElement(
ocr_class=OcrClass.PARAGRAPH,
bbox=BoundingBox(left=100, top=100, right=900, bottom=150),
direction="ltr",
language="chi_sim", # Simplified Chinese
children=[line],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=1000, bottom=500),
children=[paragraph],
)
output_pdf = tmp_path / "chinese.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
renderer.render(output_pdf)
check_pdf(str(output_pdf))
class TestFpdf2PdfRendererDebugOptions:
"""Test debug rendering options."""
def test_debug_render_options_default(self, multi_font_manager):
"""Test that debug options are disabled by default."""
page = create_simple_page()
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
assert renderer.debug_options.render_baseline is False
assert renderer.debug_options.render_word_bbox is False
assert renderer.debug_options.render_line_bbox is False
def test_debug_render_options_enabled(self, tmp_path, multi_font_manager):
"""Test rendering with debug options enabled."""
page = create_simple_page()
output_pdf = tmp_path / "debug.pdf"
debug_opts = DebugRenderOptions(
render_baseline=True,
render_word_bbox=True,
render_line_bbox=True,
)
renderer = Fpdf2PdfRenderer(
page=page,
dpi=72.0,
multi_font_manager=multi_font_manager,
invisible_text=False,
debug_render_options=debug_opts,
)
renderer.render(output_pdf)
check_pdf(str(output_pdf))
# Text should still be extractable
extracted_text = text_from_pdf(output_pdf)
assert "Hello" in extracted_text
class TestFpdf2PdfRendererErrors:
"""Test error handling in Fpdf2PdfRenderer."""
def test_invalid_ocr_class(self, multi_font_manager):
"""Test that non-page elements are rejected."""
line = OcrElement(
ocr_class=OcrClass.LINE, bbox=BoundingBox(left=0, top=0, right=100, bottom=50)
)
with pytest.raises(ValueError, match="ocr_page"):
Fpdf2PdfRenderer(page=line, dpi=72.0, multi_font_manager=multi_font_manager)
def test_page_without_bbox(self, multi_font_manager):
"""Test that pages without bbox are rejected."""
page = OcrElement(ocr_class=OcrClass.PAGE)
with pytest.raises(ValueError, match="bounding box"):
Fpdf2PdfRenderer(page=page, dpi=72.0, multi_font_manager=multi_font_manager)
class TestFpdf2PdfRendererLineTypes:
"""Test rendering of different line types."""
def test_header_line(self, tmp_path, multi_font_manager):
"""Test rendering header lines."""
word = OcrElement(
ocr_class=OcrClass.WORD,
text="Header",
bbox=BoundingBox(left=100, top=50, right=200, bottom=100),
)
header = OcrElement(
ocr_class=OcrClass.HEADER,
bbox=BoundingBox(left=100, top=50, right=900, bottom=100),
baseline=Baseline(slope=0.0, intercept=0),
children=[word],
)
paragraph = OcrElement(
ocr_class=OcrClass.PARAGRAPH,
bbox=BoundingBox(left=100, top=50, right=900, bottom=100),
direction="ltr",
language="eng",
children=[header],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=1000, bottom=500),
children=[paragraph],
)
output_pdf = tmp_path / "header.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
renderer.render(output_pdf)
check_pdf(str(output_pdf))
extracted_text = text_from_pdf(output_pdf)
assert "Header" in extracted_text
def test_caption_line(self, tmp_path, multi_font_manager):
"""Test rendering caption lines."""
word = OcrElement(
ocr_class=OcrClass.WORD,
text="Caption",
bbox=BoundingBox(left=100, top=300, right=200, bottom=350),
)
caption = OcrElement(
ocr_class=OcrClass.CAPTION,
bbox=BoundingBox(left=100, top=300, right=900, bottom=350),
baseline=Baseline(slope=0.0, intercept=0),
children=[word],
)
paragraph = OcrElement(
ocr_class=OcrClass.PARAGRAPH,
bbox=BoundingBox(left=100, top=300, right=900, bottom=350),
direction="ltr",
language="eng",
children=[caption],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=1000, bottom=500),
children=[paragraph],
)
output_pdf = tmp_path / "caption.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
renderer.render(output_pdf)
check_pdf(str(output_pdf))
extracted_text = text_from_pdf(output_pdf)
assert "Caption" in extracted_text
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_pdf_renderer.py",
"license": "Mozilla Public License 2.0",
"lines": 501,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:tests/test_pipeline_generate_ocr.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Unit tests for pipeline support of generate_ocr().
These tests verify that the pipeline supports the new generate_ocr() API
alongside the existing hOCR path.
"""
from __future__ import annotations
import dataclasses
from pathlib import Path
from unittest.mock import MagicMock, patch
from ocrmypdf import BoundingBox, OcrElement
class TestOcrEngineDirect:
"""Test the ocr_engine_direct() pipeline function."""
def test_ocr_engine_direct_function_exists(self):
"""ocr_engine_direct function should exist in _pipeline module."""
from ocrmypdf import _pipeline
assert hasattr(_pipeline, 'ocr_engine_direct')
def test_ocr_engine_direct_returns_tuple(self, tmp_path):
"""ocr_engine_direct should return (OcrElement, Path) tuple."""
from ocrmypdf._pipeline import ocr_engine_direct
# Mock page context with an engine that supports generate_ocr
mock_context = MagicMock()
mock_engine = MagicMock()
mock_engine.supports_generate_ocr.return_value = True
mock_engine.generate_ocr.return_value = (
OcrElement(ocr_class='ocr_page', bbox=BoundingBox(0, 0, 100, 100)),
"test text",
)
mock_context.plugin_manager.get_ocr_engine.return_value = mock_engine
mock_context.get_path.return_value = tmp_path / Path("test.txt")
mock_context.pageno = 0
with patch('builtins.open', MagicMock()):
result = ocr_engine_direct(Path("test.png"), mock_context)
assert isinstance(result, tuple)
assert len(result) == 2
class TestPageResultExtension:
"""Test PageResult NamedTuple extension."""
def test_page_result_has_ocr_tree_field(self):
"""PageResult should have ocr_tree field."""
from ocrmypdf._pipelines._common import PageResult
# PageResult is a NamedTuple, use _fields
assert 'ocr_tree' in PageResult._fields
def test_page_result_ocr_tree_default_none(self):
"""PageResult.ocr_tree should default to None."""
from ocrmypdf._pipelines._common import PageResult
result = PageResult(pageno=0)
assert result.ocr_tree is None
class TestFpdf2DirectPage:
"""Test Fpdf2DirectPage dataclass for direct OcrElement input."""
def test_fpdf2_direct_page_exists(self):
"""Fpdf2DirectPage dataclass should exist."""
from ocrmypdf._graft import Fpdf2DirectPage
assert Fpdf2DirectPage is not None
def test_fpdf2_direct_page_has_ocr_tree(self):
"""Fpdf2DirectPage should have ocr_tree field."""
from ocrmypdf._graft import Fpdf2DirectPage
fields = {f.name for f in dataclasses.fields(Fpdf2DirectPage)}
assert 'ocr_tree' in fields
class TestHOCRResultExtension:
"""Test HOCRResult dataclass extension."""
def test_hocr_result_has_ocr_tree_field(self):
"""HOCRResult should have ocr_tree field."""
from ocrmypdf._pipelines._common import HOCRResult
fields = {f.name for f in dataclasses.fields(HOCRResult)}
assert 'ocr_tree' in fields
def test_hocr_result_ocr_tree_default_none(self):
"""HOCRResult.ocr_tree should default to None."""
from ocrmypdf._pipelines._common import HOCRResult
result = HOCRResult(pageno=0)
assert result.ocr_tree is None
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_pipeline_generate_ocr.py",
"license": "Mozilla Public License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:tests/test_rasterizer.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Tests for the --rasterizer CLI option."""
from __future__ import annotations
from io import BytesIO
import img2pdf
import pikepdf
import pytest
from PIL import Image
from ocrmypdf._options import OcrOptions
from ocrmypdf._plugin_manager import get_plugin_manager
from ocrmypdf.helpers import IMG2PDF_KWARGS, Resolution
from .conftest import check_ocrmypdf
# Check if pypdfium2 is available
try:
import pypdfium2 # noqa: F401
PYPDFIUM_AVAILABLE = True
except ImportError:
PYPDFIUM_AVAILABLE = False
class TestRasterizerOption:
"""Test the --rasterizer CLI option."""
def test_rasterizer_auto_default(self, resources, outpdf):
"""Test that --rasterizer auto (default) works."""
check_ocrmypdf(
resources / 'graph.pdf',
outpdf,
'--rasterizer',
'auto',
'--plugin',
'tests/plugins/tesseract_noop.py',
)
def test_rasterizer_ghostscript(self, resources, outpdf):
"""Test that --rasterizer ghostscript works."""
check_ocrmypdf(
resources / 'graph.pdf',
outpdf,
'--rasterizer',
'ghostscript',
'--plugin',
'tests/plugins/tesseract_noop.py',
)
@pytest.mark.skipif(not PYPDFIUM_AVAILABLE, reason="pypdfium2 not installed")
def test_rasterizer_pypdfium(self, resources, outpdf):
"""Test that --rasterizer pypdfium works when pypdfium2 is installed."""
check_ocrmypdf(
resources / 'graph.pdf',
outpdf,
'--rasterizer',
'pypdfium',
'--plugin',
'tests/plugins/tesseract_noop.py',
)
def test_rasterizer_invalid(self):
"""Test that an invalid rasterizer value is rejected."""
with pytest.raises(ValueError, match="rasterizer must be one of"):
OcrOptions(
input_file='test.pdf', output_file='out.pdf', rasterizer='invalid'
)
class TestRasterizerWithRotation:
"""Test --rasterizer interaction with --rotate-pages."""
def test_ghostscript_with_rotation(self, resources, outpdf):
"""Test Ghostscript rasterizer with page rotation."""
check_ocrmypdf(
resources / 'cardinal.pdf',
outpdf,
'--rasterizer',
'ghostscript',
'--rotate-pages',
'--rotate-pages-threshold',
'0.1',
'--plugin',
'tests/plugins/tesseract_cache.py',
)
@pytest.mark.skipif(not PYPDFIUM_AVAILABLE, reason="pypdfium2 not installed")
def test_pypdfium_with_rotation(self, resources, outpdf):
"""Test pypdfium rasterizer with page rotation."""
check_ocrmypdf(
resources / 'cardinal.pdf',
outpdf,
'--rasterizer',
'pypdfium',
'--rotate-pages',
'--rotate-pages-threshold',
'0.1',
'--plugin',
'tests/plugins/tesseract_cache.py',
)
def test_auto_with_rotation(self, resources, outpdf):
"""Test auto rasterizer with page rotation."""
check_ocrmypdf(
resources / 'cardinal.pdf',
outpdf,
'--rasterizer',
'auto',
'--rotate-pages',
'--rotate-pages-threshold',
'0.1',
'--plugin',
'tests/plugins/tesseract_cache.py',
)
class TestRasterizerHookDirect:
"""Test rasterize_pdf_page hook directly with different rasterizer options."""
def test_ghostscript_hook_respects_option(self, resources, tmp_path):
"""Test that Ghostscript hook returns None when pypdfium is requested."""
pm = get_plugin_manager([])
# Create options requesting pypdfium
options = OcrOptions(
input_file=resources / 'graph.pdf',
output_file=tmp_path / 'out.pdf',
rasterizer='pypdfium',
)
img = tmp_path / 'ghostscript_test.png'
result = pm.rasterize_pdf_page(
input_file=resources / 'graph.pdf',
output_file=img,
raster_device='pngmono',
raster_dpi=Resolution(50, 50),
page_dpi=Resolution(50, 50),
pageno=1,
rotation=0,
filter_vector=False,
stop_on_soft_error=True,
options=options,
use_cropbox=False,
)
# When pypdfium is requested:
# - If pypdfium IS available, pypdfium handles it and returns the path
# - If pypdfium is NOT available, both plugins return None
# (ghostscript returns None because pypdfium was requested,
# pypdfium returns None because it's not installed)
if PYPDFIUM_AVAILABLE:
assert result == img
else:
assert result is None
def test_pypdfium_hook_respects_option(self, resources, tmp_path):
"""Test that pypdfium hook returns None when ghostscript is requested."""
pm = get_plugin_manager([])
# Create options requesting ghostscript
options = OcrOptions(
input_file=resources / 'graph.pdf',
output_file=tmp_path / 'out.pdf',
rasterizer='ghostscript',
)
img = tmp_path / 'pypdfium_test.png'
result = pm.rasterize_pdf_page(
input_file=resources / 'graph.pdf',
output_file=img,
raster_device='pngmono',
raster_dpi=Resolution(50, 50),
page_dpi=Resolution(50, 50),
pageno=1,
rotation=0,
filter_vector=False,
stop_on_soft_error=True,
options=options,
use_cropbox=False,
)
# Ghostscript should handle it
assert result == img
assert img.exists()
def test_auto_uses_pypdfium_when_available(self, resources, tmp_path):
"""Test that auto mode uses pypdfium when available."""
pm = get_plugin_manager([])
options = OcrOptions(
input_file=resources / 'graph.pdf',
output_file=tmp_path / 'out.pdf',
rasterizer='auto',
)
img = tmp_path / 'auto_test.png'
result = pm.rasterize_pdf_page(
input_file=resources / 'graph.pdf',
output_file=img,
raster_device='pngmono',
raster_dpi=Resolution(50, 50),
page_dpi=Resolution(50, 50),
pageno=1,
rotation=0,
filter_vector=False,
stop_on_soft_error=True,
options=options,
use_cropbox=False,
)
assert result == img
assert img.exists()
def _create_gradient_image(width: int, height: int) -> Image.Image:
"""Create an image with multiple gradients to detect rasterization errors.
The image contains:
- Horizontal gradient from red to blue
- Vertical gradient overlay from green to transparent
- Diagonal bands for edge detection
"""
img = Image.new('RGB', (width, height))
pixels = img.load()
for y in range(height):
for x in range(width):
# Horizontal gradient: red to blue
r = int(255 * (1 - x / width))
b = int(255 * (x / width))
# Vertical gradient: add green component
g = int(255 * (y / height))
# Add diagonal bands for edge detection
band = ((x + y) // 20) % 2
if band:
r = min(255, r + 40)
g = min(255, g + 40)
b = min(255, b + 40)
pixels[x, y] = (r, g, b)
return img
@pytest.fixture
def pdf_with_nonstandard_boxes(tmp_path):
"""Create a PDF with nonstandard MediaBox, TrimBox and CropBox."""
# Create an image with gradients to detect rasterization errors
img = _create_gradient_image(200, 300)
img_bytes = BytesIO()
img.save(img_bytes, format='PNG')
img_bytes.seek(0)
# Convert to PDF
pdf_bytes = BytesIO()
img2pdf.convert(
img_bytes.read(),
layout_fun=img2pdf.get_fixed_dpi_layout_fun((72, 72)),
outputstream=pdf_bytes,
**IMG2PDF_KWARGS,
)
pdf_bytes.seek(0)
# Modify the PDF to have nonstandard boxes
pdf_path = tmp_path / 'nonstandard_boxes.pdf'
with pikepdf.open(pdf_bytes) as pdf:
page = pdf.pages[0]
# Set MediaBox larger than content
page.MediaBox = pikepdf.Array([0, 0, 400, 500])
# Set CropBox smaller - this is what viewers typically show
page.CropBox = pikepdf.Array([50, 50, 350, 450])
# Set TrimBox even smaller - indicates intended trim area
page.TrimBox = pikepdf.Array([75, 75, 325, 425])
pdf.save(pdf_path)
return pdf_path
@pytest.fixture
def pdf_with_negative_mediabox(tmp_path):
"""Create a PDF with MediaBox that has negative origin coordinates."""
# Create an image with gradients to detect rasterization errors
img = _create_gradient_image(200, 300)
img_bytes = BytesIO()
img.save(img_bytes, format='PNG')
img_bytes.seek(0)
pdf_bytes = BytesIO()
img2pdf.convert(
img_bytes.read(),
layout_fun=img2pdf.get_fixed_dpi_layout_fun((72, 72)),
outputstream=pdf_bytes,
**IMG2PDF_KWARGS,
)
pdf_bytes.seek(0)
pdf_path = tmp_path / 'negative_mediabox.pdf'
with pikepdf.open(pdf_bytes) as pdf:
page = pdf.pages[0]
# MediaBox with negative origin (valid PDF but unusual)
page.MediaBox = pikepdf.Array([-100, -100, 300, 400])
pdf.save(pdf_path)
return pdf_path
class TestRasterizerWithNonStandardBoxes:
"""Test rasterizers with PDFs having nonstandard MediaBox/TrimBox/CropBox."""
def test_ghostscript_nonstandard_boxes(self, pdf_with_nonstandard_boxes, outpdf):
"""Test Ghostscript handles nonstandard page boxes correctly."""
check_ocrmypdf(
pdf_with_nonstandard_boxes,
outpdf,
'--rasterizer',
'ghostscript',
'--plugin',
'tests/plugins/tesseract_noop.py',
)
@pytest.mark.skipif(not PYPDFIUM_AVAILABLE, reason="pypdfium2 not installed")
def test_pypdfium_nonstandard_boxes(self, pdf_with_nonstandard_boxes, outpdf):
"""Test pypdfium handles nonstandard page boxes correctly."""
check_ocrmypdf(
pdf_with_nonstandard_boxes,
outpdf,
'--rasterizer',
'pypdfium',
'--plugin',
'tests/plugins/tesseract_noop.py',
)
def test_ghostscript_negative_mediabox(self, pdf_with_negative_mediabox, outpdf):
"""Test Ghostscript handles negative MediaBox origin."""
check_ocrmypdf(
pdf_with_negative_mediabox,
outpdf,
'--rasterizer',
'ghostscript',
'--plugin',
'tests/plugins/tesseract_noop.py',
)
@pytest.mark.skipif(not PYPDFIUM_AVAILABLE, reason="pypdfium2 not installed")
def test_pypdfium_negative_mediabox(self, pdf_with_negative_mediabox, outpdf):
"""Test pypdfium handles negative MediaBox origin."""
check_ocrmypdf(
pdf_with_negative_mediabox,
outpdf,
'--rasterizer',
'pypdfium',
'--plugin',
'tests/plugins/tesseract_noop.py',
)
def test_compare_rasterizers_nonstandard_boxes(
self, pdf_with_nonstandard_boxes, tmp_path
):
"""Compare output dimensions between rasterizers for nonstandard boxes."""
pm = get_plugin_manager([])
options_gs = OcrOptions(
input_file=pdf_with_nonstandard_boxes,
output_file=tmp_path / 'out_gs.pdf',
rasterizer='ghostscript',
)
img_gs = tmp_path / 'gs.png'
pm.rasterize_pdf_page(
input_file=pdf_with_nonstandard_boxes,
output_file=img_gs,
raster_device='png16m',
raster_dpi=Resolution(72, 72),
page_dpi=Resolution(72, 72),
pageno=1,
rotation=0,
filter_vector=False,
stop_on_soft_error=True,
options=options_gs,
use_cropbox=False,
)
with Image.open(img_gs) as im_gs:
gs_size = im_gs.size
if PYPDFIUM_AVAILABLE:
options_pdfium = OcrOptions(
input_file=pdf_with_nonstandard_boxes,
output_file=tmp_path / 'out_pdfium.pdf',
rasterizer='pypdfium',
)
img_pdfium = tmp_path / 'pdfium.png'
pm.rasterize_pdf_page(
input_file=pdf_with_nonstandard_boxes,
output_file=img_pdfium,
raster_device='png16m',
raster_dpi=Resolution(72, 72),
page_dpi=Resolution(72, 72),
pageno=1,
rotation=0,
filter_vector=False,
stop_on_soft_error=True,
options=options_pdfium,
use_cropbox=False,
)
with Image.open(img_pdfium) as im_pdfium:
pdfium_size = im_pdfium.size
# Both rasterizers should now produce MediaBox dimensions (400x500)
# when use_cropbox=False (the default)
assert gs_size == (400, 500), f"Ghostscript size: {gs_size}"
assert pdfium_size == (400, 500), f"pypdfium size: {pdfium_size}"
class TestRasterizerWithRotationAndBoxes:
"""Test rasterizer + rotation + nonstandard boxes combinations."""
# The pdf_with_nonstandard_boxes fixture creates a PDF with:
# - MediaBox: [0, 0, 400, 500] → 400x500 points
# - CropBox: [50, 50, 350, 450] → 300x400 points
# - TrimBox: [75, 75, 325, 425] → 250x350 points
#
# With use_cropbox=False (default), both rasterizers use MediaBox
MEDIABOX_WIDTH = 400
MEDIABOX_HEIGHT = 500
def _get_expected_size(self, rotation: int) -> tuple[int, int]:
"""Get expected image dimensions after rotation."""
width, height = self.MEDIABOX_WIDTH, self.MEDIABOX_HEIGHT
if rotation in (0, 180):
return (width, height)
else: # 90, 270
return (height, width)
def test_ghostscript_rotation_dimensions(
self, pdf_with_nonstandard_boxes, tmp_path
):
"""Test Ghostscript produces correct dimensions with rotation."""
pm = get_plugin_manager([])
options = OcrOptions(
input_file=pdf_with_nonstandard_boxes,
output_file=tmp_path / 'out.pdf',
rasterizer='ghostscript',
)
for rotation in [0, 90, 180, 270]:
img_path = tmp_path / f'gs_rot{rotation}.png'
pm.rasterize_pdf_page(
input_file=pdf_with_nonstandard_boxes,
output_file=img_path,
raster_device='png16m',
raster_dpi=Resolution(72, 72),
page_dpi=Resolution(72, 72),
pageno=1,
rotation=rotation,
filter_vector=False,
stop_on_soft_error=True,
options=options,
use_cropbox=False,
)
assert img_path.exists(), f"Failed to rasterize with rotation {rotation}"
with Image.open(img_path) as img:
expected = self._get_expected_size(rotation)
# Allow small tolerance for rounding
assert abs(img.size[0] - expected[0]) <= 2, (
f"Width mismatch at {rotation}°: got {img.size[0]}, "
f"expected {expected[0]}"
)
assert abs(img.size[1] - expected[1]) <= 2, (
f"Height mismatch at {rotation}°: got {img.size[1]}, "
f"expected {expected[1]}"
)
@pytest.mark.skipif(not PYPDFIUM_AVAILABLE, reason="pypdfium2 not installed")
def test_pypdfium_rotation_dimensions(self, pdf_with_nonstandard_boxes, tmp_path):
"""Test pypdfium produces correct dimensions with rotation."""
pm = get_plugin_manager([])
options = OcrOptions(
input_file=pdf_with_nonstandard_boxes,
output_file=tmp_path / 'out.pdf',
rasterizer='pypdfium',
)
for rotation in [0, 90, 180, 270]:
img_path = tmp_path / f'pdfium_rot{rotation}.png'
pm.rasterize_pdf_page(
input_file=pdf_with_nonstandard_boxes,
output_file=img_path,
raster_device='png16m',
raster_dpi=Resolution(72, 72),
page_dpi=Resolution(72, 72),
pageno=1,
rotation=rotation,
filter_vector=False,
stop_on_soft_error=True,
options=options,
use_cropbox=False,
)
assert img_path.exists(), f"Failed to rasterize with rotation {rotation}"
with Image.open(img_path) as img:
expected = self._get_expected_size(rotation)
# Allow small tolerance for rounding
assert abs(img.size[0] - expected[0]) <= 2, (
f"Width mismatch at {rotation}°: got {img.size[0]}, "
f"expected {expected[0]}"
)
assert abs(img.size[1] - expected[1]) <= 2, (
f"Height mismatch at {rotation}°: got {img.size[1]}, "
f"expected {expected[1]}"
)
@pytest.mark.skipif(not PYPDFIUM_AVAILABLE, reason="pypdfium2 not installed")
def test_rasterizers_produce_same_dimensions(
self, pdf_with_nonstandard_boxes, tmp_path
):
"""Verify ghostscript and pypdfium produce the same MediaBox dimensions.
With use_cropbox=False (the default), both rasterizers should render
to the MediaBox and produce identical dimensions.
"""
pm = get_plugin_manager([])
for rotation in [0, 90, 180, 270]:
# Rasterize with Ghostscript
gs_options = OcrOptions(
input_file=pdf_with_nonstandard_boxes,
output_file=tmp_path / 'out.pdf',
rasterizer='ghostscript',
)
gs_img_path = tmp_path / f'gs_cmp_rot{rotation}.png'
pm.rasterize_pdf_page(
input_file=pdf_with_nonstandard_boxes,
output_file=gs_img_path,
raster_device='png16m',
raster_dpi=Resolution(72, 72),
page_dpi=Resolution(72, 72),
pageno=1,
rotation=rotation,
filter_vector=False,
stop_on_soft_error=True,
options=gs_options,
use_cropbox=False,
)
# Rasterize with pypdfium
pdfium_options = OcrOptions(
input_file=pdf_with_nonstandard_boxes,
output_file=tmp_path / 'out.pdf',
rasterizer='pypdfium',
)
pdfium_img_path = tmp_path / f'pdfium_cmp_rot{rotation}.png'
pm.rasterize_pdf_page(
input_file=pdf_with_nonstandard_boxes,
output_file=pdfium_img_path,
raster_device='png16m',
raster_dpi=Resolution(72, 72),
page_dpi=Resolution(72, 72),
pageno=1,
rotation=rotation,
filter_vector=False,
stop_on_soft_error=True,
options=pdfium_options,
use_cropbox=False,
)
# Verify both produce the same MediaBox dimensions
with (
Image.open(gs_img_path) as gs_img,
Image.open(pdfium_img_path) as pdfium_img,
):
expected = self._get_expected_size(rotation)
assert abs(gs_img.size[0] - expected[0]) <= 2, (
f"GS width at {rotation}°: {gs_img.size[0]}, "
f"expected {expected[0]}"
)
assert abs(gs_img.size[1] - expected[1]) <= 2, (
f"GS height at {rotation}°: {gs_img.size[1]}, "
f"expected {expected[1]}"
)
assert abs(pdfium_img.size[0] - expected[0]) <= 2, (
f"pdfium width at {rotation}°: {pdfium_img.size[0]}, "
f"expected {expected[0]}"
)
assert abs(pdfium_img.size[1] - expected[1]) <= 2, (
f"pdfium height at {rotation}°: {pdfium_img.size[1]}, "
f"expected {expected[1]}"
)
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_rasterizer.py",
"license": "Mozilla Public License 2.0",
"lines": 518,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:tests/test_system_font_provider.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Unit tests for SystemFontProvider and ChainedFontProvider."""
from __future__ import annotations
import sys
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from ocrmypdf.font import (
BuiltinFontProvider,
ChainedFontProvider,
SystemFontProvider,
)
# --- SystemFontProvider Platform Detection Tests ---
class TestSystemFontProviderPlatform:
"""Test platform detection in SystemFontProvider."""
def test_get_platform_linux(self):
"""Test Linux platform detection."""
provider = SystemFontProvider()
with patch.object(sys, 'platform', 'linux'):
assert provider._get_platform() == 'linux'
def test_get_platform_darwin(self):
"""Test macOS platform detection."""
provider = SystemFontProvider()
with patch.object(sys, 'platform', 'darwin'):
assert provider._get_platform() == 'darwin'
def test_get_platform_windows(self):
"""Test Windows platform detection."""
provider = SystemFontProvider()
with patch.object(sys, 'platform', 'win32'):
assert provider._get_platform() == 'windows'
def test_get_platform_freebsd(self):
"""Test FreeBSD platform detection."""
provider = SystemFontProvider()
with patch.object(sys, 'platform', 'freebsd13'):
assert provider._get_platform() == 'freebsd'
class TestSystemFontProviderDirectories:
"""Test font directory resolution."""
def test_linux_font_dirs(self):
"""Test Linux font directories."""
provider = SystemFontProvider()
with patch.object(sys, 'platform', 'linux'):
provider._font_dirs = None # Reset cache
dirs = provider._get_font_dirs()
assert Path('/usr/share/fonts') in dirs
assert Path('/usr/local/share/fonts') in dirs
def test_darwin_font_dirs(self):
"""Test macOS font directories."""
provider = SystemFontProvider()
with patch.object(sys, 'platform', 'darwin'):
provider._font_dirs = None # Reset cache
dirs = provider._get_font_dirs()
assert Path('/Library/Fonts') in dirs
assert Path('/System/Library/Fonts') in dirs
def test_windows_font_dirs_with_windir(self):
"""Test Windows font directory from WINDIR env var."""
provider = SystemFontProvider()
with (
patch.object(sys, 'platform', 'win32'),
patch.dict('os.environ', {'WINDIR': r'D:\Windows'}),
):
provider._font_dirs = None # Reset cache
dirs = provider._get_font_dirs()
# Check that Fonts subdir of WINDIR is included
# Use str comparison to avoid Path normalization issues across platforms
dir_strs = [str(d) for d in dirs]
assert any('Fonts' in d for d in dir_strs)
def test_windows_font_dirs_default(self):
"""Test Windows font directory with default path."""
provider = SystemFontProvider()
with (
patch.object(sys, 'platform', 'win32'),
patch.dict('os.environ', {}, clear=True),
):
provider._font_dirs = None # Reset cache
dirs = provider._get_font_dirs()
# Check that Windows\Fonts is included (default fallback)
dir_strs = [str(d) for d in dirs]
assert any('Windows' in d and 'Fonts' in d for d in dir_strs)
def test_windows_font_dirs_with_localappdata(self):
"""Test Windows user fonts directory from LOCALAPPDATA env var."""
provider = SystemFontProvider()
with (
patch.object(sys, 'platform', 'win32'),
patch.dict(
'os.environ',
{'WINDIR': r'C:\Windows', 'LOCALAPPDATA': r'C:\Users\Test\AppData\Local'},
),
):
provider._font_dirs = None # Reset cache
dirs = provider._get_font_dirs()
dir_strs = [str(d) for d in dirs]
# Should have both system and user font directories
assert len(dirs) == 2
assert any('Windows' in d and 'Fonts' in d for d in dir_strs)
assert any(
'AppData' in d and 'Local' in d and 'Fonts' in d
for d in dir_strs
)
def test_font_dirs_cached(self):
"""Test that font directories are cached."""
provider = SystemFontProvider()
dirs1 = provider._get_font_dirs()
dirs2 = provider._get_font_dirs()
assert dirs1 is dirs2 # Same object, not recomputed
class TestSystemFontProviderLazyLoading:
"""Test lazy loading behavior."""
def test_no_scanning_on_init(self):
"""Test that no directory scanning happens during initialization."""
provider = SystemFontProvider()
# Caches should be empty
assert len(provider._font_cache) == 0
assert len(provider._not_found) == 0
def test_get_font_unknown_name_returns_none(self):
"""Test that unknown font names return None."""
provider = SystemFontProvider()
result = provider.get_font('UnknownFont-Regular')
assert result is None
# Unknown fonts are added to not_found to cache the negative result
assert 'UnknownFont-Regular' in provider._not_found
def test_negative_cache(self):
"""Test that not-found results are cached."""
provider = SystemFontProvider()
# Mock _find_font_file to return None
with patch.object(provider, '_find_font_file', return_value=None):
result1 = provider.get_font('NotoSansCJK-Regular')
assert result1 is None
assert 'NotoSansCJK-Regular' in provider._not_found
# Second call should not call _find_font_file again
provider._find_font_file = MagicMock(return_value=None)
result2 = provider.get_font('NotoSansCJK-Regular')
assert result2 is None
provider._find_font_file.assert_not_called()
def test_positive_cache(self):
"""Test that found fonts are cached."""
provider = SystemFontProvider()
font_dir = Path(__file__).parent.parent / "src" / "ocrmypdf" / "data"
font_path = font_dir / "NotoSans-Regular.ttf"
if not font_path.exists():
pytest.skip("Test font not available")
with patch.object(provider, '_find_font_file', return_value=font_path):
result1 = provider.get_font('NotoSans-Regular')
assert result1 is not None
assert 'NotoSans-Regular' in provider._font_cache
# Second call should use cache
provider._find_font_file = MagicMock()
result2 = provider.get_font('NotoSans-Regular')
assert result2 is result1
provider._find_font_file.assert_not_called()
class TestSystemFontProviderAvailableFonts:
"""Test get_available_fonts method."""
def test_returns_all_patterns(self):
"""Test that get_available_fonts returns all known font patterns."""
provider = SystemFontProvider()
fonts = provider.get_available_fonts()
assert 'NotoSans-Regular' in fonts
assert 'NotoSansCJK-Regular' in fonts
assert 'NotoSansArabic-Regular' in fonts
assert 'NotoSansThai-Regular' in fonts
def test_fallback_font_raises(self):
"""Test that get_fallback_font raises NotImplementedError."""
provider = SystemFontProvider()
with pytest.raises(NotImplementedError):
provider.get_fallback_font()
# --- ChainedFontProvider Tests ---
class TestChainedFontProvider:
"""Test ChainedFontProvider."""
def test_requires_at_least_one_provider(self):
"""Test that empty provider list raises error."""
with pytest.raises(ValueError, match="At least one provider"):
ChainedFontProvider([])
def test_get_font_tries_providers_in_order(self):
"""Test that get_font tries providers in order."""
provider1 = MagicMock()
provider1.get_font.return_value = None
provider2 = MagicMock()
mock_font = MagicMock()
provider2.get_font.return_value = mock_font
chain = ChainedFontProvider([provider1, provider2])
result = chain.get_font('TestFont')
provider1.get_font.assert_called_once_with('TestFont')
provider2.get_font.assert_called_once_with('TestFont')
assert result is mock_font
def test_get_font_stops_on_first_match(self):
"""Test that get_font stops after first successful match."""
mock_font = MagicMock()
provider1 = MagicMock()
provider1.get_font.return_value = mock_font
provider2 = MagicMock()
chain = ChainedFontProvider([provider1, provider2])
result = chain.get_font('TestFont')
provider1.get_font.assert_called_once()
provider2.get_font.assert_not_called()
assert result is mock_font
def test_get_font_returns_none_if_all_fail(self):
"""Test that get_font returns None if all providers fail."""
provider1 = MagicMock()
provider1.get_font.return_value = None
provider2 = MagicMock()
provider2.get_font.return_value = None
chain = ChainedFontProvider([provider1, provider2])
result = chain.get_font('TestFont')
assert result is None
def test_get_available_fonts_combines_providers(self):
"""Test that get_available_fonts combines all providers."""
provider1 = MagicMock()
provider1.get_available_fonts.return_value = ['Font1', 'Font2']
provider2 = MagicMock()
provider2.get_available_fonts.return_value = ['Font2', 'Font3']
chain = ChainedFontProvider([provider1, provider2])
fonts = chain.get_available_fonts()
assert fonts == ['Font1', 'Font2', 'Font3'] # Deduplicated, order preserved
def test_get_fallback_font_from_first_provider(self):
"""Test that get_fallback_font uses first available fallback."""
mock_font = MagicMock()
provider1 = MagicMock()
provider1.get_fallback_font.return_value = mock_font
provider2 = MagicMock()
chain = ChainedFontProvider([provider1, provider2])
result = chain.get_fallback_font()
assert result is mock_font
provider2.get_fallback_font.assert_not_called()
def test_get_fallback_font_skips_not_implemented(self):
"""Test that get_fallback_font skips providers that raise."""
provider1 = MagicMock()
provider1.get_fallback_font.side_effect = NotImplementedError()
mock_font = MagicMock()
provider2 = MagicMock()
provider2.get_fallback_font.return_value = mock_font
chain = ChainedFontProvider([provider1, provider2])
result = chain.get_fallback_font()
assert result is mock_font
def test_get_fallback_font_raises_if_none_available(self):
"""Test that get_fallback_font raises if no provider has fallback."""
provider1 = MagicMock()
provider1.get_fallback_font.side_effect = NotImplementedError()
provider2 = MagicMock()
provider2.get_fallback_font.side_effect = KeyError()
chain = ChainedFontProvider([provider1, provider2])
with pytest.raises(RuntimeError, match="No fallback font available"):
chain.get_fallback_font()
class TestChainedFontProviderIntegration:
"""Integration tests with real providers."""
@pytest.fixture
def font_dir(self):
"""Return path to font directory."""
return Path(__file__).parent.parent / "src" / "ocrmypdf" / "data"
def test_builtin_then_system_chain(self, font_dir):
"""Test chaining BuiltinFontProvider with SystemFontProvider."""
builtin = BuiltinFontProvider(font_dir)
system = SystemFontProvider()
chain = ChainedFontProvider([builtin, system])
# Should find NotoSans from builtin
font = chain.get_font('NotoSans-Regular')
assert font is not None
# Should get fallback from builtin
fallback = chain.get_fallback_font()
assert fallback is not None
def test_system_fonts_extend_builtin(self, font_dir):
"""Test that system fonts add to builtin fonts."""
builtin = BuiltinFontProvider(font_dir)
system = SystemFontProvider()
chain = ChainedFontProvider([builtin, system])
builtin_fonts = set(builtin.get_available_fonts())
chain_fonts = set(chain.get_available_fonts())
# Chain should have at least as many fonts as builtin
assert chain_fonts >= builtin_fonts
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_system_font_provider.py",
"license": "Mozilla Public License 2.0",
"lines": 267,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:tests/test_verapdf.py | # SPDX-FileCopyrightText: 2024 James R. Barlow
# SPDX-License-Identifier: CC-BY-SA-4.0
"""Tests for verapdf wrapper and speculative PDF/A conversion."""
from __future__ import annotations
import pikepdf
import pytest
from pikepdf import Name
from ocrmypdf._exec import verapdf
from ocrmypdf.pdfa import (
_pdfa_part_conformance,
add_pdfa_metadata,
add_srgb_output_intent,
speculative_pdfa_conversion,
)
class TestVerapdfModule:
"""Tests for verapdf wrapper module."""
def test_output_type_to_flavour(self):
assert verapdf.output_type_to_flavour('pdfa') == '2b'
assert verapdf.output_type_to_flavour('pdfa-1') == '1b'
assert verapdf.output_type_to_flavour('pdfa-2') == '2b'
assert verapdf.output_type_to_flavour('pdfa-3') == '3b'
# Unknown should default to 2b
assert verapdf.output_type_to_flavour('unknown') == '2b'
@pytest.mark.skipif(not verapdf.available(), reason='verapdf not installed')
def test_version(self):
ver = verapdf.version()
assert ver.major >= 1
@pytest.mark.skipif(not verapdf.available(), reason='verapdf not installed')
def test_validate_non_pdfa(self, tmp_path):
"""Test validation of a non-PDF/A file returns invalid."""
test_pdf = tmp_path / 'test.pdf'
with pikepdf.new() as pdf:
pdf.add_blank_page()
pdf.save(test_pdf)
result = verapdf.validate(test_pdf, '2b')
assert not result.valid
assert result.failed_rules > 0
class TestPdfaPartConformance:
"""Tests for _pdfa_part_conformance helper."""
def test_pdfa_part_conformance(self):
assert _pdfa_part_conformance('pdfa') == ('2', 'B')
assert _pdfa_part_conformance('pdfa-1') == ('1', 'B')
assert _pdfa_part_conformance('pdfa-2') == ('2', 'B')
assert _pdfa_part_conformance('pdfa-3') == ('3', 'B')
# Unknown should default to 2B
assert _pdfa_part_conformance('unknown') == ('2', 'B')
class TestAddPdfaMetadata:
"""Tests for add_pdfa_metadata function."""
def test_add_pdfa_metadata(self, tmp_path):
"""Test adding PDF/A XMP metadata."""
test_pdf = tmp_path / 'test.pdf'
with pikepdf.new() as pdf:
pdf.add_blank_page()
pdf.save(test_pdf)
with pikepdf.open(test_pdf, allow_overwriting_input=True) as pdf:
add_pdfa_metadata(pdf, '2', 'B')
with pdf.open_metadata() as meta:
assert meta.pdfa_status == '2B'
pdf.save(test_pdf)
# Verify it persists after save
with pikepdf.open(test_pdf) as pdf, pdf.open_metadata() as meta:
assert meta.pdfa_status == '2B'
class TestAddSrgbOutputIntent:
"""Tests for add_srgb_output_intent function."""
def test_add_srgb_output_intent(self, tmp_path):
"""Test adding sRGB OutputIntent to a PDF."""
test_pdf = tmp_path / 'test.pdf'
with pikepdf.new() as pdf:
pdf.add_blank_page()
pdf.save(test_pdf)
with pikepdf.open(test_pdf, allow_overwriting_input=True) as pdf:
add_srgb_output_intent(pdf)
assert Name.OutputIntents in pdf.Root
assert len(pdf.Root.OutputIntents) == 1
intent = pdf.Root.OutputIntents[0]
assert str(intent.get(Name.OutputConditionIdentifier)) == 'sRGB'
pdf.save(test_pdf)
def test_add_srgb_output_intent_idempotent(self, tmp_path):
"""Test that adding OutputIntent twice doesn't duplicate."""
test_pdf = tmp_path / 'test.pdf'
with pikepdf.new() as pdf:
pdf.add_blank_page()
pdf.save(test_pdf)
with pikepdf.open(test_pdf, allow_overwriting_input=True) as pdf:
add_srgb_output_intent(pdf)
add_srgb_output_intent(pdf) # Second call should be a no-op
assert len(pdf.Root.OutputIntents) == 1
pdf.save(test_pdf)
class TestSpeculativePdfaConversion:
"""Tests for speculative PDF/A conversion."""
def test_speculative_conversion_creates_pdfa_structures(self, tmp_path, resources):
"""Test that speculative conversion adds PDF/A structures."""
input_pdf = resources / 'graph.pdf'
output_pdf = tmp_path / 'output.pdf'
result = speculative_pdfa_conversion(input_pdf, output_pdf, 'pdfa-2')
assert result.exists()
with pikepdf.open(result) as pdf:
assert Name.OutputIntents in pdf.Root
with pdf.open_metadata() as meta:
assert meta.pdfa_status == '2B'
def test_speculative_conversion_different_parts(self, tmp_path, resources):
"""Test speculative conversion with different PDF/A parts."""
input_pdf = resources / 'graph.pdf'
for output_type, expected_status in [
('pdfa-1', '1B'),
('pdfa-2', '2B'),
('pdfa-3', '3B'),
]:
output_pdf = tmp_path / f'output_{output_type}.pdf'
speculative_pdfa_conversion(input_pdf, output_pdf, output_type)
with pikepdf.open(output_pdf) as pdf, pdf.open_metadata() as meta:
assert meta.pdfa_status == expected_status
@pytest.mark.skipif(not verapdf.available(), reason='verapdf not installed')
class TestVerapdfIntegration:
"""Integration tests requiring verapdf."""
def test_speculative_conversion_validation(self, tmp_path, resources):
"""Test that speculative conversion can be validated by verapdf.
Note: Most test PDFs will fail validation because they have issues
that require Ghostscript to fix (fonts, colorspaces, etc.). This test
verifies the validation pipeline works, not that all PDFs pass.
"""
input_pdf = resources / 'graph.pdf'
output_pdf = tmp_path / 'output.pdf'
speculative_pdfa_conversion(input_pdf, output_pdf, 'pdfa-2')
# The converted file can be validated (even if it fails)
result = verapdf.validate(output_pdf, '2b')
assert isinstance(result.valid, bool)
assert isinstance(result.failed_rules, int)
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_verapdf.py",
"license": "Mozilla Public License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:tests/test_page_boxes.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
from __future__ import annotations
import pikepdf
import pytest
from ocrmypdf._exec import verapdf
from .conftest import check_ocrmypdf
page_rect = [0, 0, 612, 792]
inset_rect = [200, 200, 612, 792]
wh_rect = [0, 0, 412, 592]
neg_rect = [-100, -100, 512, 692]
# When speculative PDF/A succeeds (verapdf available), MediaBox is preserved.
# Ghostscript would normalize MediaBox to start at origin, but speculative
# conversion bypasses Ghostscript.
_pdfa_inset_expected = inset_rect if verapdf.available() else wh_rect
mediabox_testdata = [
('fpdf2', 'pdfa', 'ccitt.pdf', None, inset_rect, _pdfa_inset_expected),
('sandwich', 'pdfa', 'ccitt.pdf', None, inset_rect, _pdfa_inset_expected),
('fpdf2', 'pdf', 'ccitt.pdf', None, inset_rect, inset_rect),
('sandwich', 'pdf', 'ccitt.pdf', None, inset_rect, inset_rect),
(
'fpdf2',
'pdfa',
'ccitt.pdf',
'--force-ocr',
inset_rect,
wh_rect,
),
(
'fpdf2',
'pdf',
'ccitt.pdf',
'--force-ocr',
inset_rect,
wh_rect,
),
('fpdf2', 'pdfa', 'ccitt.pdf', '--force-ocr', neg_rect, page_rect),
('fpdf2', 'pdf', 'ccitt.pdf', '--force-ocr', neg_rect, page_rect),
]
@pytest.mark.parametrize(
'renderer, output_type, in_pdf, mode, crop_to, crop_expected', mediabox_testdata
)
def test_media_box(
resources, outdir, renderer, output_type, in_pdf, mode, crop_to, crop_expected
):
with pikepdf.open(resources / in_pdf) as pdf:
page = pdf.pages[0]
page.MediaBox = crop_to
pdf.save(outdir / 'cropped.pdf')
args = [
'--jobs',
'1',
'--pdf-renderer',
renderer,
'--output-type',
output_type,
]
if mode:
args.append(mode)
check_ocrmypdf(outdir / 'cropped.pdf', outdir / 'processed.pdf', *args)
with pikepdf.open(outdir / 'processed.pdf') as pdf:
page = pdf.pages[0]
assert [float(x) for x in page.mediabox] == crop_expected
cropbox_testdata = [
('fpdf2', 'pdfa', 'ccitt.pdf', None, inset_rect, inset_rect),
('sandwich', 'pdfa', 'ccitt.pdf', None, inset_rect, inset_rect),
('fpdf2', 'pdf', 'ccitt.pdf', None, inset_rect, inset_rect),
('sandwich', 'pdf', 'ccitt.pdf', None, inset_rect, inset_rect),
(
'fpdf2',
'pdfa',
'ccitt.pdf',
'--force-ocr',
inset_rect,
inset_rect,
),
(
'fpdf2',
'pdf',
'ccitt.pdf',
'--force-ocr',
inset_rect,
inset_rect,
),
]
@pytest.mark.parametrize(
'renderer, output_type, in_pdf, mode, crop_to, crop_expected', cropbox_testdata
)
def test_crop_box(
resources, outdir, renderer, output_type, in_pdf, mode, crop_to, crop_expected
):
with pikepdf.open(resources / in_pdf) as pdf:
page = pdf.pages[0]
page.CropBox = crop_to
pdf.save(outdir / 'cropped.pdf')
args = [
'--jobs',
'1',
'--pdf-renderer',
renderer,
'--output-type',
output_type,
'--optimize',
'0',
]
if mode:
args.append(mode)
check_ocrmypdf(outdir / 'cropped.pdf', outdir / 'processed.pdf', *args)
with pikepdf.open(outdir / 'processed.pdf') as pdf:
page = pdf.pages[0]
assert [float(x) for x in page.cropbox] == crop_expected
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_page_boxes.py",
"license": "Mozilla Public License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:tests/test_watcher.py | from __future__ import annotations
import datetime as dt
import os
import shutil
import subprocess
import sys
import time
from pathlib import Path
import pytest
watchdog = pytest.importorskip('watchdog')
@pytest.mark.parametrize('year_month', [True, False])
def test_watcher(tmp_path, resources, year_month):
input_dir = tmp_path / 'input'
input_dir.mkdir()
output_dir = tmp_path / 'output'
output_dir.mkdir()
processed_dir = tmp_path / 'processed'
processed_dir.mkdir()
env_extra = {'OCR_OUTPUT_DIRECTORY_YEAR_MONTH': '1'} if year_month else {}
proc = subprocess.Popen(
[
sys.executable,
Path(__file__).parent.parent / 'misc' / 'watcher.py',
str(input_dir),
str(output_dir),
str(processed_dir),
],
cwd=str(tmp_path),
env=os.environ.copy() | env_extra,
)
time.sleep(5)
shutil.copy(resources / 'trivial.pdf', input_dir / 'trivial.pdf')
time.sleep(5)
if year_month:
assert (
output_dir
/ f'{dt.date.today().year}'
/ f'{dt.date.today().month:02d}'
/ 'trivial.pdf'
).exists()
else:
assert (output_dir / 'trivial.pdf').exists()
proc.terminate()
proc.wait()
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_watcher.py",
"license": "Mozilla Public License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:misc/_webservice.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: AGPL-3.0-or-later
"""This is a simple web service/HTTP wrapper for OCRmyPDF.
This may be more convenient than the command line tool for some Docker users.
Note that OCRmyPDF uses Ghostscript, which is licensed under AGPLv3+. While
OCRmyPDF is under GPLv3, this file is distributed under the Affero GPLv3+ license,
to emphasize that SaaS deployments should make sure they comply with
Ghostscript's license as well as OCRmyPDF's.
"""
from __future__ import annotations
import os
import subprocess
import sys
from functools import partial
from operator import getitem
from pathlib import Path
from tempfile import NamedTemporaryFile
import pikepdf
import streamlit as st
from ocrmypdf._defaults import DEFAULT_ROTATE_PAGES_THRESHOLD
def get_host_url_with_port(port: int) -> str:
"""Get the host URL for the web service. Hacky."""
host_url = st.context.headers["host"]
try:
host, _streamlit_port = host_url.split(":", maxsplit=1)
except ValueError:
host = host_url
return f"//{host}:{port}" # Use the same protocol
st.title("OCRmyPDF Web Service")
uploaded = st.file_uploader("Upload input PDF or image", type=["pdf"], key="file")
mode = st.selectbox("Mode", options=["normal", "skip-text", "force-ocr", "redo-ocr"])
pages = st.text_input(
"Pages", value="", help="Comma-separated list of pages to process"
)
with st.expander("Input options"):
invalidate_digital_signatures = st.checkbox(
"Invalidate digital signatures", value=False
)
language = st.selectbox("Language", options=["eng", "deu", "fra", "spa"])
image_dpi = st.slider(
"Image DPI", value=300, key="image_dpi", min_value=1, max_value=5000, step=50
)
with st.expander("Preprocessing"):
skip_big = st.checkbox("Skip OCR on big pages", value=False, key="skip_big")
oversample = st.slider("Oversample", min_value=0, max_value=5000, value=0, step=50)
rotate_pages = st.checkbox("Rotate pages", value=False, key="rotate")
deskew = st.checkbox("Deskew pages", value=False, key="deskew")
clean = st.checkbox("Clean pages before OCR", value=False, key="clean")
clean_final = st.checkbox("Clean final", value=False, key="clean_final")
remove_vectors = st.checkbox("Remove vectors", value=False, key="remove_vectors")
with st.expander("Output options"):
output_type = st.selectbox(
"Output type", options=["pdfa", "pdf", "pdfa-1", "pdfa-2", "pdfa-3", "none"]
)
pdf_renderer = st.selectbox(
"PDF renderer", options=["auto", "hocr", "hocrdebug", "sandwich"]
)
optimize = st.selectbox("Optimize", options=["0", "1", "2", "3"])
st.selectbox("PDF/A compression", options=["auto", "jpeg", "lossless"])
with st.expander("Metadata"):
title = author = keywords = subject = None
if uploaded:
with pikepdf.open(uploaded) as pdf, pdf.open_metadata() as meta:
st.code(str(meta), language="xml")
title = st.text_input("Title", value=meta.get('dc:title', ''))
author = st.text_input("Author", value=meta.get('dc:creator', ''))
keywords = st.text_input("Keywords", value=meta.get('dc:subject', ''))
subject = st.text_input("Subject", value=meta.get('dc:description', ''))
with st.expander("Optimization after OCR"):
jpeg_quality = st.slider(
"JPEG quality", min_value=0, max_value=100, value=75, key="jpeg_quality"
)
png_quality = st.slider(
"PNG quality", min_value=0, max_value=100, value=75, key="png_quality"
)
jbig2_threshold = st.number_input(
"JBIG2 threshold", value=0.85, key="jbig2_threshold"
)
with st.expander("Advanced options"):
jobs = st.slider(
"Threads",
min_value=1,
max_value=os.cpu_count(),
value=os.cpu_count(),
key="threads",
)
max_image_mpixels = st.number_input(
"Max image size",
value=250.0,
min_value=0.0,
help="Maximum image size in megapixels",
)
rotate_pages_threshold = st.number_input(
"Rotate pages threshold",
value=DEFAULT_ROTATE_PAGES_THRESHOLD,
min_value=0.0,
max_value=1000.0,
help="Threshold for automatic page rotation",
)
fast_web_view = st.number_input(
"Fast web view",
value=1.0,
min_value=0.0,
help="Linearize files above this size in MB",
)
continue_on_soft_render_error = st.checkbox(
"Continue on soft render error", value=True
)
verbose_labels = ["quiet", "default", "debug", "debug_all"]
verbose = st.selectbox(
"Verbosity level",
options=[-1, 0, 1, 2],
index=1,
format_func=partial(getitem, verbose_labels),
)
if uploaded:
args = []
if mode and mode != 'normal':
args.append(f"--{mode}")
if language:
args.append(f"--language={language}")
if not uploaded.name.lower().endswith(".pdf") and image_dpi:
args.append(f"--image-dpi={image_dpi}")
if skip_big:
args.append("--skip-big")
if oversample:
args.append(f"--oversample={oversample}")
if rotate_pages:
args.append("--rotate-pages")
if deskew:
args.append("--deskew")
if clean:
args.append("--clean")
if clean_final:
args.append("--clean-final")
if remove_vectors:
args.append("--remove-vectors")
if output_type:
args.append(f"--output-type={output_type}")
if pdf_renderer:
args.append(f"--pdf-renderer={pdf_renderer}")
if optimize:
args.append(f"--optimize={optimize}")
if title:
args.append(f"--title={title}")
if author:
args.append(f"--author={author}")
if keywords:
args.append(f"--keywords={keywords}")
if subject:
args.append(f"--subject={subject}")
if pages:
args.append(f"--pages={pages}")
if max_image_mpixels:
args.append(f"--max-image-mpixels={max_image_mpixels}")
if rotate_pages_threshold:
args.append(f"--rotate-pages-threshold={rotate_pages_threshold}")
if fast_web_view:
args.append(f"--fast-web-view={fast_web_view}")
if continue_on_soft_render_error:
args.append("--continue-on-soft-render-error")
if verbose:
args.append(f"--verbose={verbose}")
if optimize > '0' and jpeg_quality:
args.append(f"--jpeg-quality={jpeg_quality}")
if optimize > '0' and png_quality:
args.append(f"--png-quality={png_quality}")
if jbig2_threshold:
args.append(f"--jbig2-threshold={jbig2_threshold}")
if jobs:
args.append(f"--jobs={jobs}")
with NamedTemporaryFile(delete=True, suffix=f"_{uploaded.name}") as input_file:
input_file.write(uploaded.getvalue())
input_file.flush()
input_file.seek(0)
args.append(str(input_file.name))
with NamedTemporaryFile(delete=True, suffix=".pdf") as output_file:
args.append(str(output_file.name))
st.session_state['running'] = (
'run_button' in st.session_state and st.session_state.run_button
)
if st.button(
"Run OCRmyPDF",
disabled=st.session_state.get("running", False),
key='run_button',
):
st.session_state['running'] = True
args = [sys.executable, '-u', '-m', "ocrmypdf"] + args
proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
with st.container(border=True):
while proc.poll() is None:
line = proc.stderr.readline()
if line:
st.html("<code>" + line.decode().strip() + "</code>")
if proc.returncode != 0:
st.error(f"ocrmypdf failed with exit code {proc.returncode}")
st.session_state['running'] = False
st.stop()
if Path(output_file.name).stat().st_size == 0:
st.error("No output PDF file was generated")
st.stop()
st.download_button(
label="Download output PDF",
data=output_file.read(),
file_name=uploaded.name,
mime="application/pdf",
)
st.session_state['running'] = False
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "misc/_webservice.py",
"license": "Mozilla Public License 2.0",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:misc/ocrmypdf_compare.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MIT
"""Run OCRmyPDF on the same PDF with different options."""
from __future__ import annotations
import os
import shlex
from io import BytesIO
from pathlib import Path
from subprocess import check_output, run
from tempfile import TemporaryDirectory
import pikepdf
import pymupdf
import streamlit as st
from lxml import etree
from streamlit_pdf_viewer import pdf_viewer
def do_column(label, suffix, d):
cli = st.text_area(
f"Command line arguments for {label}",
key=f"args{suffix}",
value="ocrmypdf {in_} {out}",
)
env_text = st.text_area(f"Environment variables for {label}", key=f"env{suffix}")
env = os.environ.copy()
for line in env_text.splitlines():
if line:
try:
k, v = line.split("=", 1)
except ValueError:
st.error(f"Invalid environment variable: {line}")
break
env[k] = v
args = shlex.split(
cli.format(
in_=os.path.join(d, "input.pdf"),
out=os.path.join(d, f"output{suffix}.pdf"),
)
)
with st.expander("Environment variables", expanded=bool(env_text.strip())):
st.code('\n'.join(f"{k}={v}" for k, v in env.items()))
st.code(shlex.join(args))
return env, args
def main():
st.set_page_config(layout="wide")
st.title("OCRmyPDF Compare")
st.write("Run OCRmyPDF on the same PDF with different options.")
st.warning("This is a testing tool and is not intended for production use.")
uploaded_pdf = st.file_uploader("Upload a PDF", type=["pdf"])
if uploaded_pdf is None:
return
pdf_bytes = uploaded_pdf.read()
with pikepdf.open(BytesIO(pdf_bytes)) as p, TemporaryDirectory() as d:
with st.expander("PDF Metadata"):
with p.open_metadata() as meta:
xml_txt = str(meta)
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.fromstring(xml_txt, parser=parser)
st.code(
etree.tostring(tree, pretty_print=True).decode("utf-8"),
language="xml",
)
st.write(p.docinfo)
st.write("Number of pages:", len(p.pages))
col1, col2 = st.columns(2)
with col1:
env1, args1 = do_column("A", "1", d)
with col2:
env2, args2 = do_column("B", "2", d)
if not st.button("Execute and Compare"):
return
with st.spinner("Executing..."):
Path(d, "input.pdf").write_bytes(pdf_bytes)
run(args1, env=env1)
run(args2, env=env2)
col1, col2 = st.columns(2)
with col1:
st.text(
"Ghostscript version A: "
+ check_output(
["gs", "--version"],
env=env1,
text=True,
)
)
with col2:
st.text(
"Ghostscript version B: "
+ check_output(
["gs", "--version"],
env=env2,
text=True,
)
)
doc1 = pymupdf.open(os.path.join(d, "output1.pdf"))
doc2 = pymupdf.open(os.path.join(d, "output2.pdf"))
for i, page1_2 in enumerate(zip(doc1, doc2, strict=False)):
st.write(f"Page {i+1}")
page1, page2 = page1_2
col1, col2 = st.columns(2)
with col1, st.container(border=True):
st.write(page1.get_text())
with col2, st.container(border=True):
st.write(page2.get_text())
col1, col2 = st.columns(2)
with col1, st.expander("PDF Viewer"):
pdf_viewer(Path(d, "output1.pdf"))
with col2, st.expander("PDF Viewer"):
pdf_viewer(Path(d, "output2.pdf"))
if __name__ == "__main__":
main()
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "misc/ocrmypdf_compare.py",
"license": "Mozilla Public License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:misc/pdf_compare.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MIT
"""Compare two PDFs."""
from __future__ import annotations
import os
from io import BytesIO
from pathlib import Path
from tempfile import TemporaryDirectory
import pikepdf
import pymupdf
import streamlit as st
from lxml import etree
from streamlit_pdf_viewer import pdf_viewer
def do_metadata(pdf):
with pikepdf.open(pdf) as pdf:
with pdf.open_metadata() as meta:
xml_txt = str(meta)
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.fromstring(xml_txt, parser=parser)
st.code(
etree.tostring(tree, pretty_print=True).decode("utf-8"),
language="xml",
)
st.write(pdf.docinfo)
st.write("Number of pages:", len(pdf.pages))
def main():
st.set_page_config(layout="wide")
st.title("PDF Compare")
st.write("Compare two PDFs.")
col1, col2 = st.columns(2)
with col1:
uploaded_pdf1 = st.file_uploader("Upload a PDF", type=["pdf"], key='pdf1')
with col2:
uploaded_pdf2 = st.file_uploader("Upload a PDF", type=["pdf"], key='pdf2')
if uploaded_pdf1 is None or uploaded_pdf2 is None:
return
pdf_bytes1 = uploaded_pdf1.getvalue()
pdf_bytes2 = uploaded_pdf2.getvalue()
with st.expander("PDF Metadata"):
col1, col2 = st.columns(2)
with col1:
do_metadata(BytesIO(pdf_bytes1))
with col2:
do_metadata(BytesIO(pdf_bytes2))
with TemporaryDirectory() as d:
Path(d, "1.pdf").write_bytes(pdf_bytes1)
Path(d, "2.pdf").write_bytes(pdf_bytes2)
with st.expander("Text"):
doc1 = pymupdf.open(os.path.join(d, "1.pdf"))
doc2 = pymupdf.open(os.path.join(d, "2.pdf"))
for i, page1_2 in enumerate(zip(doc1, doc2, strict=False)):
st.write(f"Page {i+1}")
page1, page2 = page1_2
col1, col2 = st.columns(2)
with col1, st.container(border=True):
st.write(page1.get_text())
with col2, st.container(border=True):
st.write(page2.get_text())
with st.expander("PDF Viewer"):
col1, col2 = st.columns(2)
with col1:
pdf_viewer(Path(d, "1.pdf"), key='pdf_viewer1', render_text=True)
with col2:
pdf_viewer(Path(d, "2.pdf"), key='pdf_viewer2', render_text=True)
if __name__ == "__main__":
main()
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "misc/pdf_compare.py",
"license": "Mozilla Public License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:misc/pdf_text_diff.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Compare text in PDFs."""
from __future__ import annotations
from pathlib import Path
from subprocess import run
from tempfile import NamedTemporaryFile
from typing import Annotated
import cyclopts
app = cyclopts.App()
@app.default
def main(
pdf1: Annotated[Path, cyclopts.Parameter()],
pdf2: Annotated[Path, cyclopts.Parameter()],
*,
engine: Annotated[str, cyclopts.Parameter()] = 'pdftotext',
):
"""Compare text in PDFs."""
with open(pdf1, 'rb') as f1, open(pdf2, 'rb') as f2:
text1 = run(
['pdftotext', '-layout', '-', '-'],
stdin=f1,
capture_output=True,
check=True,
)
text2 = run(
['pdftotext', '-layout', '-', '-'],
stdin=f2,
capture_output=True,
check=True,
)
with NamedTemporaryFile() as t1, NamedTemporaryFile() as t2:
t1.write(text1.stdout)
t1.flush()
t2.write(text2.stdout)
t2.flush()
diff = run(
['diff', '--color=always', '--side-by-side', t1.name, t2.name],
capture_output=True,
)
run(['less', '-R'], input=diff.stdout, check=True)
if text1.stdout.strip() != text2.stdout.strip():
return 1
return 0
if __name__ == '__main__':
app()
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "misc/pdf_text_diff.py",
"license": "Mozilla Public License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
onnx/onnx:onnx/backend/test/case/node/bitcast.py | # Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.node import expect
class BitCast(Base):
@staticmethod
def export_bitcast_float32_to_int32() -> None:
"""Test bitcasting from float32 to int32 (same size)."""
node = onnx.helper.make_node(
"BitCast",
inputs=["x"],
outputs=["y"],
to=onnx.TensorProto.INT32,
)
x = np.array([1.0, -2.5, 3.75], dtype=np.float32)
y = x.view(np.int32)
expect(node, inputs=[x], outputs=[y], name="test_bitcast_float32_to_int32")
@staticmethod
def export_bitcast_int32_to_float32() -> None:
"""Test bitcasting from int32 to float32 (same size)."""
node = onnx.helper.make_node(
"BitCast",
inputs=["x"],
outputs=["y"],
to=onnx.TensorProto.FLOAT,
)
x = np.array([1065353216, -1071644672, 1081081856], dtype=np.int32)
y = x.view(np.float32)
expect(node, inputs=[x], outputs=[y], name="test_bitcast_int32_to_float32")
@staticmethod
def export_bitcast_float64_to_int64() -> None:
"""Test bitcasting from float64 to int64 (same size)."""
node = onnx.helper.make_node(
"BitCast",
inputs=["x"],
outputs=["y"],
to=onnx.TensorProto.INT64,
)
x = np.array([1.0, -2.5, 3.75], dtype=np.float64)
y = x.view(np.int64)
expect(node, inputs=[x], outputs=[y], name="test_bitcast_float64_to_int64")
@staticmethod
def export_bitcast_int64_to_float64() -> None:
"""Test bitcasting from int64 to float64 (same size)."""
node = onnx.helper.make_node(
"BitCast",
inputs=["x"],
outputs=["y"],
to=onnx.TensorProto.DOUBLE,
)
x = np.array(
[4607182418800017408, -4611686018427387904, 4614256656552045184],
dtype=np.int64,
)
y = x.view(np.float64)
expect(node, inputs=[x], outputs=[y], name="test_bitcast_int64_to_float64")
@staticmethod
def export_bitcast_uint32_to_int32() -> None:
"""Test bitcasting from uint32 to int32 (same size, different signedness)."""
node = onnx.helper.make_node(
"BitCast",
inputs=["x"],
outputs=["y"],
to=onnx.TensorProto.INT32,
)
x = np.array([4294967295, 2147483648, 2147483647], dtype=np.uint32)
y = x.view(np.int32)
expect(node, inputs=[x], outputs=[y], name="test_bitcast_uint32_to_int32")
@staticmethod
def export_bitcast_2d_float32_to_int32() -> None:
"""Test bitcasting 2D array from float32 to int32."""
node = onnx.helper.make_node(
"BitCast",
inputs=["x"],
outputs=["y"],
to=onnx.TensorProto.INT32,
)
x = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32)
y = x.view(np.int32)
expect(node, inputs=[x], outputs=[y], name="test_bitcast_2d_float32_to_int32")
@staticmethod
def export_bitcast_int8_to_uint8() -> None:
"""Test bitcasting from int8 to uint8 (same size, different signedness)."""
node = onnx.helper.make_node(
"BitCast",
inputs=["x"],
outputs=["y"],
to=onnx.TensorProto.UINT8,
)
x = np.array([-1, -128, 127, 0], dtype=np.int8)
y = x.view(np.uint8)
expect(node, inputs=[x], outputs=[y], name="test_bitcast_int8_to_uint8")
@staticmethod
def export_bitcast_scalar_float32_to_int32() -> None:
"""Test bitcasting scalar from float32 to int32."""
node = onnx.helper.make_node(
"BitCast",
inputs=["x"],
outputs=["y"],
to=onnx.TensorProto.INT32,
)
x = np.array(1.0, dtype=np.float32)
y = x.view(np.int32)
expect(
node, inputs=[x], outputs=[y], name="test_bitcast_scalar_float32_to_int32"
)
@staticmethod
def export_bitcast_uint16_to_int16() -> None:
"""Test bitcasting from uint16 to int16 (same size, different signedness)."""
node = onnx.helper.make_node(
"BitCast",
inputs=["x"],
outputs=["y"],
to=onnx.TensorProto.INT16,
)
x = np.array([1, 32768, 65535], dtype=np.uint16)
y = x.view(np.int16)
expect(node, inputs=[x], outputs=[y], name="test_bitcast_uint16_to_int16")
@staticmethod
def export_bitcast_bool_to_uint8() -> None:
"""Test bitcasting from bool to uint8 (same size)."""
node = onnx.helper.make_node(
"BitCast",
inputs=["x"],
outputs=["y"],
to=onnx.TensorProto.UINT8,
)
x = np.array([True, False, True, False], dtype=np.bool_)
y = x.view(np.uint8)
expect(node, inputs=[x], outputs=[y], name="test_bitcast_bool_to_uint8")
| {
"repo_id": "onnx/onnx",
"file_path": "onnx/backend/test/case/node/bitcast.py",
"license": "Apache License 2.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
onnx/onnx:onnx/reference/ops/op_bitcast.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
import onnx
from onnx.reference.op_run import OpRun
class BitCast(OpRun):
def _run(self, x, to: int): # type: ignore
if to == onnx.TensorProto.STRING:
raise ValueError("BitCast to STRING is not supported")
if x.dtype == np.str_:
raise ValueError("BitCast from STRING is not supported")
target_dtype = onnx.helper.tensor_dtype_to_np_dtype(to)
if x.dtype.itemsize != np.dtype(target_dtype).itemsize:
raise ValueError(
f"BitCast requires input and output types to have the same "
f"bit-width, but got {x.dtype} ({x.dtype.itemsize * 8} bits) "
f"and {target_dtype} ({np.dtype(target_dtype).itemsize * 8} bits)"
)
result = x.view(target_dtype)
return (result,)
| {
"repo_id": "onnx/onnx",
"file_path": "onnx/reference/ops/op_bitcast.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
onnx/onnx:onnx/backend/test/case/node/cumprod.py | # Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.node import expect
class CumProd(Base):
@staticmethod
def export_cumprod_1d() -> None:
node = onnx.helper.make_node("CumProd", inputs=["x", "axis"], outputs=["y"])
x = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64)
axis = np.array(0, dtype=np.int32)
y = np.array([1.0, 2.0, 6.0, 24.0, 120.0]).astype(np.float64)
expect(node, inputs=[x, axis], outputs=[y], name="test_cumprod_1d")
@staticmethod
def export_cumprod_1d_exclusive() -> None:
node = onnx.helper.make_node(
"CumProd", inputs=["x", "axis"], outputs=["y"], exclusive=1
)
x = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64)
axis = np.array(0, dtype=np.int32)
y = np.array([1.0, 1.0, 2.0, 6.0, 24.0]).astype(np.float64)
expect(node, inputs=[x, axis], outputs=[y], name="test_cumprod_1d_exclusive")
@staticmethod
def export_cumprod_1d_reverse() -> None:
node = onnx.helper.make_node(
"CumProd", inputs=["x", "axis"], outputs=["y"], reverse=1
)
x = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64)
axis = np.array(0, dtype=np.int32)
y = np.array([120.0, 120.0, 60.0, 20.0, 5.0]).astype(np.float64)
expect(node, inputs=[x, axis], outputs=[y], name="test_cumprod_1d_reverse")
@staticmethod
def export_cumprod_1d_reverse_exclusive() -> None:
node = onnx.helper.make_node(
"CumProd", inputs=["x", "axis"], outputs=["y"], reverse=1, exclusive=1
)
x = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64)
axis = np.array(0, dtype=np.int32)
y = np.array([120.0, 60.0, 20.0, 5.0, 1.0]).astype(np.float64)
expect(
node,
inputs=[x, axis],
outputs=[y],
name="test_cumprod_1d_reverse_exclusive",
)
@staticmethod
def export_cumprod_2d_axis_0() -> None:
node = onnx.helper.make_node(
"CumProd",
inputs=["x", "axis"],
outputs=["y"],
)
x = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3))
axis = np.array(0, dtype=np.int32)
y = (
np.array([1.0, 2.0, 3.0, 4.0, 10.0, 18.0])
.astype(np.float64)
.reshape((2, 3))
)
expect(node, inputs=[x, axis], outputs=[y], name="test_cumprod_2d_axis_0")
@staticmethod
def export_cumprod_2d_axis_1() -> None:
node = onnx.helper.make_node(
"CumProd",
inputs=["x", "axis"],
outputs=["y"],
)
x = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3))
axis = np.array(1, dtype=np.int32)
y = (
np.array([1.0, 2.0, 6.0, 4.0, 20.0, 120.0])
.astype(np.float64)
.reshape((2, 3))
)
expect(node, inputs=[x, axis], outputs=[y], name="test_cumprod_2d_axis_1")
@staticmethod
def export_cumprod_2d_negative_axis() -> None:
node = onnx.helper.make_node(
"CumProd",
inputs=["x", "axis"],
outputs=["y"],
)
x = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3))
axis = np.array(-1, dtype=np.int32)
y = (
np.array([1.0, 2.0, 6.0, 4.0, 20.0, 120.0])
.astype(np.float64)
.reshape((2, 3))
)
expect(
node, inputs=[x, axis], outputs=[y], name="test_cumprod_2d_negative_axis"
)
@staticmethod
def export_cumprod_2d_int32() -> None:
node = onnx.helper.make_node(
"CumProd",
inputs=["x", "axis"],
outputs=["y"],
)
x = np.array([1, 2, 3, 4, 5, 6]).astype(np.int32).reshape((2, 3))
axis = np.array(0, dtype=np.int32)
y = np.array([1, 2, 3, 4, 10, 18]).astype(np.int32).reshape((2, 3))
expect(node, inputs=[x, axis], outputs=[y], name="test_cumprod_2d_int32")
@staticmethod
def export_cumprod_1d_int32_exclusive() -> None:
node = onnx.helper.make_node(
"CumProd", inputs=["x", "axis"], outputs=["y"], exclusive=1
)
x = np.array([1, 2, 3, 4, 5]).astype(np.int32)
axis = np.array(0, dtype=np.int32)
y = np.array([1, 1, 2, 6, 24]).astype(np.int32)
expect(
node, inputs=[x, axis], outputs=[y], name="test_cumprod_1d_int32_exclusive"
)
| {
"repo_id": "onnx/onnx",
"file_path": "onnx/backend/test/case/node/cumprod.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
onnx/onnx:onnx/reference/ops/op_cum_prod.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
from onnx.reference.op_run import OpRun
class CumProd(OpRun):
def _run(self, x, axis, exclusive=None, reverse=None):
axis = np.asarray(axis)
if axis.ndim != 0:
raise ValueError(f"Axis must be a rank-0 tensor, got `{axis.ndim}`.")
if reverse:
rev_indices = [slice(0, s) for s in x.shape]
rev_indices[axis] = slice(None, None, -1)
x = x[tuple(rev_indices)]
if exclusive:
indices_c = [slice(0, s) for s in x.shape]
indices_d = [slice(0, s) for s in x.shape]
indices_c[axis] = slice(0, -1)
indices_d[axis] = slice(1, x.shape[axis])
res = np.ones(x.shape, dtype=x.dtype)
np.cumprod(x[tuple(indices_c)], axis=axis, out=res[tuple(indices_d)])
else:
res = np.cumprod(x, axis=axis, dtype=x.dtype)
if reverse:
res = res[tuple(rev_indices)]
return (res,)
| {
"repo_id": "onnx/onnx",
"file_path": "onnx/reference/ops/op_cum_prod.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
onnx/onnx:tools/spec_to_yaml.py | # Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
"""Output ONNX spec in YAML format.
Usage:
python spec_to_yaml.py --output onnx-spec/defs
"""
from __future__ import annotations
import argparse
import enum
import pathlib
from collections.abc import Iterable
from typing import Any
from ruamel.yaml import YAML
import onnx
def dump_onnx_object(
onnx_obj: onnx.defs.OpSchema
| onnx.defs.OpSchema.Attribute
| onnx.defs.OpSchema.FormalParameter
| onnx.defs.OpSchema.TypeConstraintParam,
) -> dict[str, Any]:
res = {}
for attr in dir(onnx_obj):
if attr.startswith("_"):
continue
value = getattr(onnx_obj, attr)
if isinstance(value, enum.EnumType) or "nanobind" in str(type(value)):
continue
if attr == "default_value" and isinstance(
onnx_obj, onnx.defs.OpSchema.Attribute
):
value = onnx.helper.get_attribute_value(value)
value = dump_value(value)
if not value:
continue
res[attr] = value
return res
def dump_enum(value: enum.Enum) -> str | None:
for member in type(value):
if member == value:
if member.name == "Unknown":
return None
return member.name
raise RuntimeError(f"Unhandled type {type(value)}")
def dump_value(value: Any): # noqa: PLR0911
match value:
case None:
return None
case (
onnx.defs.OpSchema()
| onnx.defs.OpSchema.Attribute()
| onnx.defs.OpSchema.FormalParameter()
| onnx.defs.OpSchema.TypeConstraintParam()
):
return dump_onnx_object(value)
case onnx.FunctionProto():
return onnx.printer.to_text(value)
case enum.Enum():
return dump_enum(value)
case dict():
return {k: dump_value(v) for k, v in value.items()}
case float() | int() | str():
return value
case Iterable():
return type(value)(dump_value(v) for v in value) # type: ignore
raise RuntimeError(f"Unhandled type {type(value)}")
def main():
parser = argparse.ArgumentParser(description="Output ONNX spec in YAML format.")
parser.add_argument("--output", help="Output directory", required=True)
args = parser.parse_args()
schemas = onnx.defs.get_all_schemas_with_history()
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
latest_versions: dict = {}
for schema in schemas:
if schema.name in latest_versions:
latest_versions[schema.name] = max(
latest_versions[schema.name], schema.since_version
)
else:
latest_versions[schema.name] = schema.since_version
for schema in schemas:
schema_dict = dump_value(schema)
domain = schema.domain or "ai.onnx"
outdir = pathlib.Path(args.output) / domain
if latest_versions[schema.name] != schema.since_version:
outdir = outdir / "old"
else:
outdir = outdir / "latest"
outdir.mkdir(parents=True, exist_ok=True)
path = outdir / f"{schema.name}-{schema.since_version}.yaml"
with open(path, "w", encoding="utf-8") as f:
print(f"Writing {path}")
yaml.dump(schema_dict, f)
if __name__ == "__main__":
main()
| {
"repo_id": "onnx/onnx",
"file_path": "tools/spec_to_yaml.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
onnx/onnx:onnx/test/test_env_python_executable.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import os
import pathlib
import platform
import sys
import sysconfig
import tempfile
import unittest
from unittest.mock import patch
# Extract get_python_execute function from setup.py for testing
def get_python_execute() -> str:
"""Get the Python executable path for CMake configuration.
Prefer sys.executable as it represents the currently running Python.
Only fall back to directory traversal if sys.executable is invalid.
"""
WINDOWS = os.name == "nt"
if WINDOWS:
return sys.executable
# First, check if sys.executable is valid and usable
if os.path.isfile(sys.executable) and os.access(sys.executable, os.X_OK):
return sys.executable
# Fallback: Try to search for Python based on include path
# This addresses https://github.com/python/cpython/issues/84399
python_dir = os.path.abspath(
os.path.join(sysconfig.get_path("include"), "..", "..")
)
if os.path.isdir(python_dir):
python_bin = os.path.join(python_dir, "bin", "python3")
if os.path.isfile(python_bin):
return python_bin
python_bin = os.path.join(python_dir, "bin", "python")
if os.path.isfile(python_bin):
return python_bin
return sys.executable
class TestGetPythonExecutable(unittest.TestCase):
"""Test suite for get_python_execute() function from setup.py."""
def test_windows_returns_sys_executable(self) -> None:
"""On Windows, get_python_execute() should always return sys.executable."""
with patch("os.name", "nt"):
result = get_python_execute()
self.assertEqual(result, sys.executable)
def test_valid_sys_executable_is_preferred(self) -> None:
"""When sys.executable is valid, it should be returned (non-Windows)."""
with patch("os.name", "posix"):
# sys.executable should be valid in most test environments
result = get_python_execute()
# Should return sys.executable since it's valid
self.assertEqual(result, sys.executable)
@unittest.skipIf(
sys.platform == "win32" and sys.version_info < (3, 11),
"On Windows this test requires Python >= 3.11 due to sysconfig/sys.abiflags behavior on older interpreters",
)
def test_invalid_sys_executable_falls_back(self) -> None:
"""When sys.executable is invalid, should fall back to directory search."""
with (
patch("os.name", "posix"),
patch("sys.executable", "/nonexistent/python"),
patch("os.path.isfile") as mock_isfile,
patch("os.access") as mock_access,
patch("os.path.isdir") as mock_isdir,
):
# Mock sys.executable as invalid
mock_isfile.return_value = False
mock_access.return_value = False
mock_isdir.return_value = False
result = get_python_execute()
# Should fall back to sys.executable as last resort
self.assertEqual(result, "/nonexistent/python")
def test_fallback_finds_python3_in_bin(self) -> None:
"""Test fallback finds python3 in bin directory when sys.executable is invalid."""
# Create a temporary directory structure for testing
with tempfile.TemporaryDirectory() as tmpdir:
mock_python_dir = pathlib.Path(tmpdir) / "python_install"
mock_bin_dir = mock_python_dir / "bin"
# Include path typically goes: <prefix>/include/pythonX.Y
# Going up two dirs (.. ..) gets us back to prefix
mock_include_dir = mock_python_dir / "include" / "python3.12"
mock_bin_dir.mkdir(parents=True)
mock_include_dir.mkdir(parents=True)
mock_python3 = mock_bin_dir / "python3"
# Create actual executable file
mock_python3.touch(mode=0o755)
with (
patch("os.name", "posix"),
patch("sys.executable", "/invalid/python"),
patch("sysconfig.get_path") as mock_get_path,
):
# Setup mocks - return the include/python3.12 path
mock_get_path.return_value = str(mock_include_dir)
result = get_python_execute()
self.assertEqual(result, str(mock_python3))
def test_fallback_finds_python_in_bin(self) -> None:
"""Test fallback finds python (not python3) when python3 doesn't exist."""
# Create a temporary directory structure for testing
with tempfile.TemporaryDirectory() as tmpdir:
mock_python_dir = pathlib.Path(tmpdir) / "python_install"
mock_bin_dir = mock_python_dir / "bin"
# Include path typically goes: <prefix>/include/pythonX.Y
# Going up two dirs (.. ..) gets us back to prefix
mock_include_dir = mock_python_dir / "include" / "python3.12"
mock_bin_dir.mkdir(parents=True)
mock_include_dir.mkdir(parents=True)
mock_python = mock_bin_dir / "python"
# Create actual executable file (python3 doesn't exist)
mock_python.touch(mode=0o755)
with (
patch("os.name", "posix"),
patch("sys.executable", "/invalid/python"),
patch("sysconfig.get_path") as mock_get_path,
):
# Setup mocks - return the include/python3.12 path
mock_get_path.return_value = str(mock_include_dir)
result = get_python_execute()
self.assertEqual(result, str(mock_python))
@unittest.skipIf(
sys.platform == "win32" and sys.version_info < (3, 11),
"On Windows this test requires Python >= 3.11 due to sysconfig/sys.abiflags behavior on older interpreters",
)
def test_executable_permission_check(self) -> None:
"""Test that executable permission is verified for sys.executable."""
with (
patch("os.name", "posix"),
patch("sys.executable", "/path/to/python"),
patch("os.path.isfile") as mock_isfile,
patch("os.access") as mock_access,
patch("os.path.isdir") as mock_isdir,
):
# File exists but is not executable
mock_isfile.return_value = True
mock_access.return_value = False
mock_isdir.return_value = False
result = get_python_execute()
# Should fall back to sys.executable even though it's not executable
self.assertEqual(result, sys.executable)
def test_real_environment(self) -> None:
"""Test with real environment to ensure it works in practice."""
result = get_python_execute()
# Result should be a non-empty string
self.assertIsInstance(result, str)
self.assertGreater(len(result), 0)
# On Windows, should be exactly sys.executable
if platform.system() == "Windows":
self.assertEqual(result, sys.executable)
# On non-Windows, in a normal environment, should return sys.executable
# since it should be valid
elif os.path.isfile(sys.executable) and os.access(sys.executable, os.X_OK):
self.assertEqual(result, sys.executable)
def test_virtual_environment_detection(self) -> None:
"""Test that virtual environment Python is correctly detected."""
with patch("os.name", "posix"):
result = get_python_execute()
# In a venv or virtualenv, sys.executable should point to the venv Python
# and the function should return it
if hasattr(sys, "prefix") and hasattr(sys, "base_prefix"):
in_venv = sys.prefix != sys.base_prefix
if in_venv and os.path.isfile(sys.executable):
self.assertEqual(
result,
sys.executable,
"Virtual environment Python should be detected",
)
@unittest.skipIf(
platform.system() == "Windows",
"Fallback mechanism only applies to POSIX systems",
)
def test_cpython_issue_84399_fallback(self) -> None:
"""Test that the fallback handles cpython issue #84399 edge case.
This test is skipped on Windows because the Windows implementation
always returns sys.executable without any fallback logic.
The fallback mechanism is only relevant for POSIX systems.
"""
# This test verifies that the fallback mechanism is still present
# for the edge case mentioned in https://github.com/python/cpython/issues/84399
with (
patch("sys.executable", "/usr/bin/python-invalid"),
patch("os.path.isfile") as mock_isfile,
patch("os.access") as mock_access,
patch("sysconfig.get_path") as mock_get_path,
patch("os.path.isdir") as mock_isdir,
patch("os.path.abspath") as mock_abspath,
patch("os.path.join", side_effect=lambda *args: "/".join(args)),
):
# sys.executable points to invalid path
mock_isfile.return_value = False
mock_access.return_value = False
mock_get_path.return_value = "/usr/include/python3.12"
mock_isdir.return_value = True
mock_abspath.return_value = "/usr"
# Mock finding python3 in /usr/bin
def isfile_check(path: str) -> bool:
return path == "/usr/bin/python3"
mock_isfile.side_effect = isfile_check
result = get_python_execute()
self.assertEqual(result, "/usr/bin/python3")
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "onnx/onnx",
"file_path": "onnx/test/test_env_python_executable.py",
"license": "Apache License 2.0",
"lines": 198,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
onnx/onnx:onnx/test/node_shape_inference_test.py | # SPDX-License-Identifier: Apache-2.0
# Copyright (c) ONNX Project Contributors
from __future__ import annotations
import unittest
import parameterized
import onnx.helper
import onnx.shape_inference
class NodeInferenceTest(unittest.TestCase):
@parameterized.parameterized.expand(
[
("GreaterOrEqual",),
("LessOrEqual",),
]
)
def test_comparison_op(self, op_type):
node = onnx.helper.make_node(op_type, ["x", "y"], ["z"])
schema = onnx.defs.get_schema(node.op_type, 23, "")
xtype = onnx.helper.make_tensor_type_proto(onnx.TensorProto.INT32, [1, 10])
ytype = onnx.helper.make_tensor_type_proto(onnx.TensorProto.INT32, [10, 1])
result = onnx.shape_inference.infer_node_outputs(
schema, node, {"x": xtype, "y": ytype}
)
self.assertEqual(list(result.keys()), ["z"])
self.assertEqual(result["z"].tensor_type.elem_type, onnx.TensorProto.BOOL)
self.assertEqual(
[dim.dim_value for dim in result["z"].tensor_type.shape.dim],
[10, 10],
)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "onnx/onnx",
"file_path": "onnx/test/node_shape_inference_test.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
onnx/onnx:onnx/reference/ops/op_tensor_scatter.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
from onnx.reference.op_run import OpRun
class TensorScatter(OpRun):
def _run(self, past_cache, update, write_indices=None, mode="linear", axis=-2):
if mode not in {"linear", "circular"}:
raise ValueError(f"Unsupported mode: {mode}")
if write_indices is None:
write_indices = np.zeros((past_cache.shape[0],), dtype=np.int64)
input_shape = past_cache.shape
update_shape = update.shape
axis = axis % len(input_shape)
for i in range(len(input_shape)):
if i != axis:
if input_shape[i] != update_shape[i]:
raise ValueError(
f"Input shape {input_shape} and update shape {update_shape} are not compatible in dimension {i}"
)
if i == axis:
if input_shape[i] < update_shape[i]:
raise ValueError(
f"Input shape {input_shape} and update shape {update_shape} are not compatible in axis dimension"
)
max_sequence_length = input_shape[axis]
sequence_length = update_shape[axis]
present_cache = np.copy(past_cache)
for prefix_idx in np.ndindex(input_shape[:axis]):
batch_idx = prefix_idx[0]
for sequence_idx in range(sequence_length):
cache_idx = (*prefix_idx, write_indices[batch_idx] + sequence_idx)
if mode == "circular":
cache_idx = tuple(
np.mod(np.asarray(cache_idx), max_sequence_length)
)
update_idx = (*prefix_idx, sequence_idx)
present_cache[cache_idx] = update[update_idx]
return (present_cache.reshape(input_shape),)
| {
"repo_id": "onnx/onnx",
"file_path": "onnx/reference/ops/op_tensor_scatter.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
onnx/onnx:onnx/backend/test/case/node/swish.py | # Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.node import expect
def swish(x: np.ndarray, alpha: float) -> np.ndarray:
return x * (1 / (1 + np.exp(-alpha * x)))
class Swish(Base):
@staticmethod
def export() -> None:
node = onnx.helper.make_node(
"Swish",
inputs=["x"],
outputs=["y"],
alpha=1.0, # pass alpha as attribute
)
x = np.array([3, 4, 5], dtype=np.float32)
y = swish(x, alpha=1.0)
expect(
node,
inputs=[x],
outputs=[y],
name="test_swish",
opset_imports=[onnx.helper.make_opsetid("", 24)],
)
| {
"repo_id": "onnx/onnx",
"file_path": "onnx/backend/test/case/node/swish.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
onnx/onnx:onnx/reference/ops/op_swish.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
from onnx.reference.ops._op import OpRunUnaryNum
class Swish(OpRunUnaryNum):
def _run(self, x, alpha=None):
alpha = self.alpha if alpha is None else alpha
return (x * (1 / (1 + np.exp(-alpha * x))).astype(x.dtype),)
| {
"repo_id": "onnx/onnx",
"file_path": "onnx/reference/ops/op_swish.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
onnx/onnx:onnx/backend/test/case/node/lpnormalization.py | # Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.node import expect
class LpNormalization(Base):
@staticmethod
def export_l2normalization_axis_0() -> None:
node = onnx.helper.make_node(
"LpNormalization", inputs=["x"], outputs=["y"], axis=0, p=2
)
x = np.array(
[[[1.0, 2.0, 2.0], [3.0, 4.0, 0.0]], [[0.0, 5.0, 5.0], [6.0, 8.0, 0.0]]],
dtype=np.float32,
)
l2_norm_axis_0 = np.sqrt(np.sum(x**2, axis=0, keepdims=True))
# When norm is 0, output is 0 (0/0 = 0)
y = np.where(l2_norm_axis_0 == 0, 0, x / l2_norm_axis_0)
expect(node, inputs=[x], outputs=[y], name="test_l2normalization_axis_0")
@staticmethod
def export_l2normalization_axis_1() -> None:
node = onnx.helper.make_node(
"LpNormalization", inputs=["x"], outputs=["y"], axis=1, p=2
)
x = np.array([[3.0, 4.0], [6.0, 8.0]], dtype=np.float32)
l2_norm_axis_1 = np.sqrt(np.sum(x**2, axis=1, keepdims=True))
y = x / l2_norm_axis_1
expect(node, inputs=[x], outputs=[y], name="test_l2normalization_axis_1")
@staticmethod
def export_l1normalization_axis_0() -> None:
node = onnx.helper.make_node(
"LpNormalization", inputs=["x"], outputs=["y"], axis=0, p=1
)
x = np.array([3.0, 4.0], dtype=np.float32)
l1_norm_axis_0 = np.sum(abs(x), axis=0, keepdims=True)
y = x / l1_norm_axis_0
expect(node, inputs=[x], outputs=[y], name="test_l1normalization_axis_0")
@staticmethod
def export_l1normalization_axis_1() -> None:
node = onnx.helper.make_node(
"LpNormalization", inputs=["x"], outputs=["y"], axis=1, p=1
)
x = np.array([[3.0, 4.0], [6.0, 8.0]], dtype=np.float32)
l1_norm_axis_1 = np.sum(abs(x), axis=1, keepdims=True)
y = x / l1_norm_axis_1
expect(node, inputs=[x], outputs=[y], name="test_l1normalization_axis_1")
@staticmethod
def export_l1normalization_axis_last() -> None:
node = onnx.helper.make_node(
"LpNormalization", inputs=["x"], outputs=["y"], axis=-1, p=1
)
x = np.array(
[[[1.0, 2.0, 2.0], [3.0, 4.0, 0.0]], [[0.0, 5.0, 5.0], [6.0, 8.0, 0.0]]],
dtype=np.float32,
)
l1_norm_axis_last = np.sum(abs(x), axis=-1, keepdims=True)
y = x / l1_norm_axis_last
expect(node, inputs=[x], outputs=[y], name="test_l1normalization_axis_last")
@staticmethod
def export_default() -> None:
node = onnx.helper.make_node("LpNormalization", inputs=["x"], outputs=["y"])
x = np.array(
[[[1.0, 2.0, 2.0], [3.0, 4.0, 0.0]], [[0.0, 5.0, 5.0], [6.0, 8.0, 0.0]]],
dtype=np.float32,
)
lp_norm_default = np.sqrt(np.sum(x**2, axis=-1, keepdims=True))
y = x / lp_norm_default
expect(node, inputs=[x], outputs=[y], name="test_lpnormalization_default")
| {
"repo_id": "onnx/onnx",
"file_path": "onnx/backend/test/case/node/lpnormalization.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/gpt-oss:tests/gpt_oss/tools/simple_browser/test_backend.py | import pytest
from typing import Generator, Any
from unittest import mock
from aiohttp import ClientSession
from gpt_oss.tools.simple_browser.backend import YouComBackend
class MockAiohttpResponse:
"""Mocks responses for get/post requests from async libraries."""
def __init__(self, json: dict, status: int):
self._json = json
self.status = status
async def json(self):
return self._json
async def __aexit__(self, exc_type, exc, tb):
pass
async def __aenter__(self):
return self
def mock_os_environ_get(name: str, default: Any = "test_api_key"):
assert name in ["YDC_API_KEY"]
return default
def test_youcom_backend():
backend = YouComBackend(source="web")
assert backend.source == "web"
@pytest.mark.asyncio
@mock.patch("aiohttp.ClientSession.get")
async def test_youcom_backend_search(mock_session_get):
backend = YouComBackend(source="web")
api_response = {
"results": {
"web": [
{"title": "Web Result 1", "url": "https://www.example.com/web1", "snippets": "Web Result 1 snippets"},
{"title": "Web Result 2", "url": "https://www.example.com/web2", "snippets": "Web Result 2 snippets"},
],
"news": [
{"title": "News Result 1", "url": "https://www.example.com/news1", "description": "News Result 1 description"},
{"title": "News Result 2", "url": "https://www.example.com/news2", "description": "News Result 2 description"},
],
}
}
with mock.patch("os.environ.get", wraps=mock_os_environ_get):
mock_session_get.return_value = MockAiohttpResponse(api_response, 200)
async with ClientSession() as session:
result = await backend.search(query="test", topn=10, session=session)
assert result.title == "test"
assert result.urls == {"0": "https://www.example.com/web1", "1": "https://www.example.com/web2", "2": "https://www.example.com/news1", "3": "https://www.example.com/news2"}
@pytest.mark.asyncio
@mock.patch("aiohttp.ClientSession.post")
async def test_youcom_backend_fetch(mock_session_get):
backend = YouComBackend(source="web")
api_response = [
{"title": "Fetch Result 1", "url": "https://www.example.com/fetch1", "html": "<div>Fetch Result 1 text</div>"},
]
with mock.patch("os.environ.get", wraps=mock_os_environ_get):
mock_session_get.return_value = MockAiohttpResponse(api_response, 200)
async with ClientSession() as session:
result = await backend.fetch(url="https://www.example.com/fetch1", session=session)
assert result.title == "Fetch Result 1"
assert result.text == "\nURL: https://www.example.com/fetch1\nFetch Result 1 text"
| {
"repo_id": "openai/gpt-oss",
"file_path": "tests/gpt_oss/tools/simple_browser/test_backend.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/gpt-oss:examples/gradio/gradio_chat.py | import json
import requests
import gradio as gr
DEFAULT_FUNCTION_PROPERTIES = """
{
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
}
},
"required": ["location"]
}
""".strip()
def chat_with_model(message, history, model_choice, instructions, effort, use_functions,
function_name, function_description, function_parameters,
use_browser_search, temperature, max_output_tokens, debug_mode):
if not message.strip():
return history, ""
# Append user message and empty assistant placeholder (idiomatic Gradio pattern)
history = history + [[message, ""]]
# Build messages list from history (excluding the empty assistant placeholder)
messages = []
# Convert history to messages format (excluding the last empty assistant message)
for user_msg, assistant_msg in history[:-1]:
if user_msg:
messages.append({
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": user_msg}]
})
if assistant_msg:
messages.append({
"type": "message",
"role": "assistant",
"content": [{"type": "output_text", "text": assistant_msg}]
})
# Add current user message
messages.append({
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": message}]
})
# Prepare tools
tools = []
if use_functions:
try:
tools.append({
"type": "function",
"name": function_name,
"description": function_description,
"parameters": json.loads(function_parameters),
})
except json.JSONDecodeError:
pass
if use_browser_search:
tools.append({"type": "browser_search"})
# Get URL based on model (matching streamlit logic)
options = ["large", "small"]
URL = ("http://localhost:8081/v1/responses" if model_choice == options[1]
else "http://localhost:8000/v1/responses")
try:
response = requests.post(
URL,
json={
"input": messages,
"stream": True,
"instructions": instructions,
"reasoning": {"effort": effort},
"metadata": {"__debug": debug_mode},
"tools": tools,
"temperature": temperature,
"max_output_tokens": max_output_tokens,
},
stream=True,
)
full_content = ""
text_delta = ""
current_output_index = 0
in_reasoning = False
for line in response.iter_lines(decode_unicode=True):
if not line or not line.startswith("data:"):
continue
data_str = line[len("data:"):].strip()
if not data_str:
continue
try:
data = json.loads(data_str)
except Exception:
continue
event_type = data.get("type", "")
output_index = data.get("output_index", 0)
if event_type == "response.output_item.added":
current_output_index = output_index
output_type = data.get("item", {}).get("type", "message")
text_delta = ""
if output_type == "reasoning":
if not in_reasoning:
full_content += "🤔 **Thinking...**\n"
in_reasoning = True
elif output_type == "message":
if in_reasoning:
full_content += "\n\n"
in_reasoning = False
elif event_type == "response.reasoning_text.delta":
delta = data.get("delta", "")
full_content += delta
# Update last assistant message (idiomatic Gradio pattern)
history[-1][1] = full_content
yield history, ""
elif event_type == "response.output_text.delta":
delta = data.get("delta", "")
full_content += delta
# Update last assistant message (idiomatic Gradio pattern)
history[-1][1] = full_content
yield history, ""
elif event_type == "response.output_item.done":
item = data.get("item", {})
if item.get("type") == "function_call":
function_call_text = f"\n\n🔨 Called `{item.get('name')}`\n**Arguments**\n```json\n{item.get('arguments', '')}\n```"
full_content += function_call_text
# Update last assistant message (idiomatic Gradio pattern)
history[-1][1] = full_content
yield history, ""
elif item.get("type") == "web_search_call":
web_search_text = f"\n\n🌐 **Web Search**\n```json\n{json.dumps(item.get('action', {}), indent=2)}\n```\n✅ Done"
full_content += web_search_text
# Update last assistant message (idiomatic Gradio pattern)
history[-1][1] = full_content
yield history, ""
elif event_type == "response.completed":
response_data = data.get("response", {})
if debug_mode:
debug_info = response_data.get("metadata", {}).get("__debug", "")
if debug_info:
full_content += f"\n\n**Debug**\n```\n{debug_info}\n```"
# Update last assistant message (idiomatic Gradio pattern)
history[-1][1] = full_content
yield history, ""
break
# Return final history and empty string to clear textbox
return history, ""
except Exception as e:
error_message = f"❌ Error: {str(e)}"
history[-1][1] = error_message
return history, ""
# Create the Gradio interface
with gr.Blocks(title="💬 Chatbot") as demo:
gr.Markdown("# 💬 Chatbot")
with gr.Row():
with gr.Column(scale=3):
chatbot = gr.Chatbot(height=500)
with gr.Row():
msg = gr.Textbox(placeholder="Type a message...", scale=4, show_label=False)
send_btn = gr.Button("Send", scale=1)
clear_btn = gr.Button("Clear Chat")
with gr.Column(scale=1):
model_choice = gr.Radio(["large", "small"], value="small", label="Model")
instructions = gr.Textbox(
label="Instructions",
value="You are a helpful assistant that can answer questions and help with tasks.",
lines=3
)
effort = gr.Radio(["low", "medium", "high"], value="medium", label="Reasoning effort")
gr.Markdown("#### Functions")
use_functions = gr.Checkbox(label="Use functions", value=False)
with gr.Column(visible=False) as function_group:
function_name = gr.Textbox(label="Function name", value="get_weather")
function_description = gr.Textbox(
label="Function description",
value="Get the weather for a given city"
)
function_parameters = gr.Textbox(
label="Function parameters",
value=DEFAULT_FUNCTION_PROPERTIES,
lines=6
)
# Conditional browser search (matching Streamlit logic)
# In Streamlit: if "show_browser" in st.query_params:
# For Gradio, we'll always show it (simplified)
gr.Markdown("#### Built-in Tools")
use_browser_search = gr.Checkbox(label="Use browser search", value=False)
temperature = gr.Slider(0.0, 1.0, value=1.0, step=0.01, label="Temperature")
max_output_tokens = gr.Slider(1000, 20000, value=1024, step=100, label="Max output tokens")
debug_mode = gr.Checkbox(label="Debug mode", value=False)
# Event handlers
def toggle_function_group(use_funcs):
return gr.update(visible=use_funcs)
use_functions.change(toggle_function_group, use_functions, function_group)
# Chat functionality
inputs = [msg, chatbot, model_choice, instructions, effort, use_functions,
function_name, function_description, function_parameters,
use_browser_search, temperature, max_output_tokens, debug_mode]
msg.submit(chat_with_model, inputs, [chatbot, msg])
send_btn.click(chat_with_model, inputs, [chatbot, msg])
clear_btn.click(lambda: [], outputs=chatbot)
if __name__ == "__main__":
demo.launch() | {
"repo_id": "openai/gpt-oss",
"file_path": "examples/gradio/gradio_chat.py",
"license": "Apache License 2.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/evals/basic_eval.py | """
Basic eval
"""
from . import report
from .types import Eval, EvalResult, SamplerBase, SingleEvalResult
class BasicEval(Eval):
def __init__(self,):
self.examples = [{
"question": "hi",
"answer": "hi, how can i help?",
}]
def __call__(self, sampler: SamplerBase) -> EvalResult:
def fn(row: dict):
sampler_response = sampler([
sampler._pack_message(content=row["question"], role="user")
])
response_text = sampler_response.response_text
extracted_answer = response_text
actual_queried_prompt_messages = sampler_response.actual_queried_message_list
score = 1.0 if len(extracted_answer) > 0 else 0.0
html = report.jinja_env.from_string(report.HTML_JINJA).render(
prompt_messages=actual_queried_prompt_messages,
next_message=dict(content=response_text, role="assistant"),
score=score,
correct_answer=row["answer"],
extracted_answer=extracted_answer,
)
convo = actual_queried_prompt_messages + [dict(content=response_text, role="assistant")]
return SingleEvalResult(
html=html, score=score, convo=convo, metrics={"chars": len(response_text)}
)
results = report.map_with_progress(fn, self.examples, num_threads=1)
return report.aggregate_results(results)
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/evals/basic_eval.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:tests/test_api_endpoints.py | import pytest
import json
import asyncio
from fastapi import status
from unittest.mock import patch, MagicMock, AsyncMock
class TestResponsesEndpoint:
def test_basic_response_creation(self, api_client, sample_request_data):
response = api_client.post("/v1/responses", json=sample_request_data)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert "id" in data
assert data["object"] == "response"
assert data["model"] == sample_request_data["model"]
def test_response_with_high_reasoning(self, api_client, sample_request_data):
sample_request_data["reasoning_effort"] = "high"
response = api_client.post("/v1/responses", json=sample_request_data)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert "id" in data
assert data["status"] == "completed"
def test_response_with_medium_reasoning(self, api_client, sample_request_data):
sample_request_data["reasoning_effort"] = "medium"
response = api_client.post("/v1/responses", json=sample_request_data)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert "id" in data
assert data["status"] == "completed"
def test_response_with_invalid_model(self, api_client, sample_request_data):
sample_request_data["model"] = "invalid-model"
response = api_client.post("/v1/responses", json=sample_request_data)
# Should still accept but might handle differently
assert response.status_code == status.HTTP_200_OK
def test_response_with_empty_input(self, api_client, sample_request_data):
sample_request_data["input"] = ""
response = api_client.post("/v1/responses", json=sample_request_data)
assert response.status_code == status.HTTP_200_OK
def test_response_with_tools(self, api_client, sample_request_data):
sample_request_data["tools"] = [
{
"type": "browser_search"
}
]
response = api_client.post("/v1/responses", json=sample_request_data)
assert response.status_code == status.HTTP_200_OK
def test_response_with_custom_temperature(self, api_client, sample_request_data):
for temp in [0.0, 0.5, 1.0, 1.5, 2.0]:
sample_request_data["temperature"] = temp
response = api_client.post("/v1/responses", json=sample_request_data)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert "usage" in data
def test_streaming_response(self, api_client, sample_request_data):
sample_request_data["stream"] = True
with api_client.stream("POST", "/v1/responses", json=sample_request_data) as response:
assert response.status_code == status.HTTP_200_OK
# Verify we get SSE events
for line in response.iter_lines():
if line and line.startswith("data: "):
event_data = line[6:] # Remove "data: " prefix
if event_data != "[DONE]":
json.loads(event_data) # Should be valid JSON
break
class TestResponsesWithSession:
def test_response_with_session_id(self, api_client, sample_request_data):
session_id = "test-session-123"
sample_request_data["session_id"] = session_id
# First request
response1 = api_client.post("/v1/responses", json=sample_request_data)
assert response1.status_code == status.HTTP_200_OK
data1 = response1.json()
# Second request with same session
sample_request_data["input"] = "Follow up question"
response2 = api_client.post("/v1/responses", json=sample_request_data)
assert response2.status_code == status.HTTP_200_OK
data2 = response2.json()
# Should have different response IDs
assert data1["id"] != data2["id"]
def test_response_continuation(self, api_client, sample_request_data):
# Create initial response
response1 = api_client.post("/v1/responses", json=sample_request_data)
assert response1.status_code == status.HTTP_200_OK
data1 = response1.json()
response_id = data1["id"]
# Continue the response
continuation_request = {
"model": sample_request_data["model"],
"response_id": response_id,
"input": "Continue the previous thought"
}
response2 = api_client.post("/v1/responses", json=continuation_request)
assert response2.status_code == status.HTTP_200_OK
class TestErrorHandling:
def test_missing_required_fields(self, api_client):
# Model field has default, so test with empty JSON
response = api_client.post("/v1/responses", json={})
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
def test_invalid_reasoning_effort(self, api_client, sample_request_data):
sample_request_data["reasoning_effort"] = "invalid"
response = api_client.post("/v1/responses", json=sample_request_data)
# May handle gracefully or return error
assert response.status_code in [status.HTTP_200_OK, status.HTTP_422_UNPROCESSABLE_ENTITY]
def test_malformed_json(self, api_client):
response = api_client.post(
"/v1/responses",
data="not json",
headers={"Content-Type": "application/json"}
)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
def test_extremely_long_input(self, api_client, sample_request_data):
# Test with very long input
sample_request_data["input"] = "x" * 100000
response = api_client.post("/v1/responses", json=sample_request_data)
assert response.status_code == status.HTTP_200_OK
class TestToolIntegration:
def test_browser_search_tool(self, api_client, sample_request_data):
sample_request_data["tools"] = [
{
"type": "browser_search"
}
]
response = api_client.post("/v1/responses", json=sample_request_data)
assert response.status_code == status.HTTP_200_OK
def test_function_tool_integration(self, api_client, sample_request_data):
sample_request_data["tools"] = [
{
"type": "function",
"name": "test_function",
"parameters": {"type": "object", "properties": {}},
"description": "Test function"
}
]
response = api_client.post("/v1/responses", json=sample_request_data)
assert response.status_code == status.HTTP_200_OK
def test_multiple_tools(self, api_client, sample_request_data):
sample_request_data["tools"] = [
{
"type": "browser_search"
},
{
"type": "function",
"name": "test_function",
"parameters": {"type": "object", "properties": {}},
"description": "Test function"
}
]
response = api_client.post("/v1/responses", json=sample_request_data)
assert response.status_code == status.HTTP_200_OK
class TestPerformance:
def test_response_time_under_threshold(self, api_client, sample_request_data, performance_timer):
performance_timer.start()
response = api_client.post("/v1/responses", json=sample_request_data)
elapsed = performance_timer.stop()
assert response.status_code == status.HTTP_200_OK
# Response should be reasonably fast for mock inference
assert elapsed < 5.0 # 5 seconds threshold
def test_multiple_sequential_requests(self, api_client, sample_request_data):
# Test multiple requests work correctly
for i in range(3):
data = sample_request_data.copy()
data["input"] = f"Request {i}"
response = api_client.post("/v1/responses", json=data)
assert response.status_code == status.HTTP_200_OK
class TestUsageTracking:
def test_usage_object_structure(self, api_client, sample_request_data):
response = api_client.post("/v1/responses", json=sample_request_data)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert "usage" in data
usage = data["usage"]
assert "input_tokens" in usage
assert "output_tokens" in usage
assert "total_tokens" in usage
# reasoning_tokens may not always be present
# assert "reasoning_tokens" in usage
# Basic validation
assert usage["input_tokens"] >= 0
assert usage["output_tokens"] >= 0
assert usage["total_tokens"] == usage["input_tokens"] + usage["output_tokens"]
def test_usage_increases_with_longer_input(self, api_client, sample_request_data):
# Short input
response1 = api_client.post("/v1/responses", json=sample_request_data)
usage1 = response1.json()["usage"]
# Longer input
sample_request_data["input"] = sample_request_data["input"] * 10
response2 = api_client.post("/v1/responses", json=sample_request_data)
usage2 = response2.json()["usage"]
# Longer input should use more tokens
assert usage2["input_tokens"] > usage1["input_tokens"] | {
"repo_id": "openai/gpt-oss",
"file_path": "tests/test_api_endpoints.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/gpt-oss:examples/agents-sdk-python/example.py | import asyncio
from pathlib import Path
import shutil
from openai import AsyncOpenAI
from agents import (
Agent,
ItemHelpers,
Runner,
set_default_openai_api,
set_default_openai_client,
set_tracing_disabled,
function_tool,
)
from agents.mcp import MCPServerStdio
async def prompt_user(question: str) -> str:
"""Async input prompt function"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, input, question)
async def main():
# Set up OpenAI client for local server (e.g., Ollama)
openai_client = AsyncOpenAI(
api_key="local",
base_url="http://localhost:11434/v1",
)
# Get current working directory
samples_dir = str(Path.cwd())
# Create MCP server for filesystem operations
mcp_server = MCPServerStdio(
name="Filesystem MCP Server, via npx",
params={
"command": "npx",
"args": [
"-y",
"@modelcontextprotocol/server-filesystem",
samples_dir,
],
},
)
# Connect to MCP server
await mcp_server.connect()
# Configure agents SDK
set_tracing_disabled(True)
set_default_openai_client(openai_client)
set_default_openai_api("chat_completions")
# Define weather tool
@function_tool
async def get_weather(location: str) -> str:
return f"The weather in {location} is sunny."
# Create agent
agent = Agent(
name="My Agent",
instructions="You are a helpful assistant.",
tools=[get_weather],
model="gpt-oss:20b-test",
mcp_servers=[mcp_server],
)
# Get user input
user_input = await prompt_user("> ")
# Run agent with streaming
result = Runner.run_streamed(agent, user_input)
# Process streaming results
async for event in result.stream_events():
if event.type == "raw_response_event":
continue
elif event.type == "agent_updated_stream_event":
print(f"Agent updated: {event.new_agent.name}")
elif event.type == "run_item_stream_event":
if event.item.type == "tool_call_item":
print("-- Tool was called")
elif event.item.type == "tool_call_output_item":
print(f"-- Tool output: {event.item.output}")
elif event.item.type == "message_output_item":
print(
f"-- Message output:\n {ItemHelpers.text_message_output(event.item)}"
)
else:
pass
print("=== Run complete ===")
if __name__ == "__main__":
if not shutil.which("npx"):
raise RuntimeError(
"npx is not installed. Please install it with `npm install -g npx`."
)
asyncio.run(main())
| {
"repo_id": "openai/gpt-oss",
"file_path": "examples/agents-sdk-python/example.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt-oss-mcp-server/browser_server.py | import os
from collections.abc import AsyncIterator
from contextlib import asynccontextmanager
from dataclasses import dataclass, field
from typing import Union, Optional
from mcp.server.fastmcp import Context, FastMCP
from gpt_oss.tools.simple_browser import SimpleBrowserTool
from gpt_oss.tools.simple_browser.backend import YouComBackend, ExaBackend
@dataclass
class AppContext:
browsers: dict[str, SimpleBrowserTool] = field(default_factory=dict)
def create_or_get_browser(self, session_id: str) -> SimpleBrowserTool:
if session_id not in self.browsers:
tool_backend = os.getenv("BROWSER_BACKEND", "exa")
if tool_backend == "youcom":
backend = YouComBackend(source="web")
elif tool_backend == "exa":
backend = ExaBackend(source="web")
else:
raise ValueError(f"Invalid tool backend: {tool_backend}")
self.browsers[session_id] = SimpleBrowserTool(backend=backend)
return self.browsers[session_id]
def remove_browser(self, session_id: str) -> None:
self.browsers.pop(session_id, None)
@asynccontextmanager
async def app_lifespan(_server: FastMCP) -> AsyncIterator[AppContext]:
yield AppContext()
# Pass lifespan to server
mcp = FastMCP(
name="browser",
instructions=r"""
Tool for browsing.
The `cursor` appears in brackets before each browsing display: `[{cursor}]`.
Cite information from the tool using the following format:
`【{cursor}†L{line_start}(-L{line_end})?】`, for example: `【6†L9-L11】` or `【8†L3】`.
Do not quote more than 10 words directly from the tool output.
sources=web
""".strip(),
lifespan=app_lifespan,
port=8001,
)
@mcp.tool(
name="search",
title="Search for information",
description=
"Searches for information related to `query` and displays `topn` results.",
)
async def search(ctx: Context,
query: str,
topn: int = 10,
source: Optional[str] = None) -> str:
"""Search for information related to a query"""
browser = ctx.request_context.lifespan_context.create_or_get_browser(
ctx.client_id)
messages = []
async for message in browser.search(query=query, topn=topn, source=source):
if message.content and hasattr(message.content[0], 'text'):
messages.append(message.content[0].text)
return "\n".join(messages)
@mcp.tool(
name="open",
title="Open a link or page",
description="""
Opens the link `id` from the page indicated by `cursor` starting at line number `loc`, showing `num_lines` lines.
Valid link ids are displayed with the formatting: `【{id}†.*】`.
If `cursor` is not provided, the most recent page is implied.
If `id` is a string, it is treated as a fully qualified URL associated with `source`.
If `loc` is not provided, the viewport will be positioned at the beginning of the document or centered on the most relevant passage, if available.
Use this function without `id` to scroll to a new location of an opened page.
""".strip(),
)
async def open_link(ctx: Context,
id: Union[int, str] = -1,
cursor: int = -1,
loc: int = -1,
num_lines: int = -1,
view_source: bool = False,
source: Optional[str] = None) -> str:
"""Open a link or navigate to a page location"""
browser = ctx.request_context.lifespan_context.create_or_get_browser(
ctx.client_id)
messages = []
async for message in browser.open(id=id,
cursor=cursor,
loc=loc,
num_lines=num_lines,
view_source=view_source,
source=source):
if message.content and hasattr(message.content[0], 'text'):
messages.append(message.content[0].text)
return "\n".join(messages)
@mcp.tool(
name="find",
title="Find pattern in page",
description=
"Finds exact matches of `pattern` in the current page, or the page given by `cursor`.",
)
async def find_pattern(ctx: Context, pattern: str, cursor: int = -1) -> str:
"""Find exact matches of a pattern in the current page"""
browser = ctx.request_context.lifespan_context.create_or_get_browser(
ctx.client_id)
messages = []
async for message in browser.find(pattern=pattern, cursor=cursor):
if message.content and hasattr(message.content[0], 'text'):
messages.append(message.content[0].text)
return "\n".join(messages)
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt-oss-mcp-server/browser_server.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt-oss-mcp-server/build-system-prompt.py | import datetime
import asyncio
from gpt_oss.tokenizer import get_tokenizer
from openai_harmony import (
Conversation,
DeveloperContent,
HarmonyEncodingName,
Message,
ReasoningEffort,
Role,
SystemContent,
ToolNamespaceConfig,
ToolDescription,
load_harmony_encoding,
)
from mcp import ClientSession
from mcp.client.sse import sse_client
from mcp.types import ListToolsResult
async def list_server_and_tools(server_url: str):
async with sse_client(url=server_url) as streams, ClientSession(
*streams) as session:
initialize_response = await session.initialize()
list_tools_response = await session.list_tools()
return initialize_response, list_tools_response
def trim_schema(schema: dict) -> dict:
# Turn JSON Schema from MCP generated into Harmony's variant.
if "title" in schema:
del schema["title"]
if "default" in schema and schema["default"] is None:
del schema["default"]
if "anyOf" in schema:
# Turn "anyOf": [{"type": "type-1"}, {"type": "type-2"}] into "type": ["type-1", "type-2"]
# if there's more than 1 types, also remove "null" type as Harmony will just ignore it
types = [
type_dict["type"] for type_dict in schema["anyOf"]
if type_dict["type"] != 'null'
]
schema["type"] = types
del schema["anyOf"]
if "properties" in schema:
schema["properties"] = {
k: trim_schema(v)
for k, v in schema["properties"].items()
}
return schema
def post_process_tools_description(
list_tools_result: ListToolsResult) -> ListToolsResult:
# Adapt the MCP tool result for Harmony
for tool in list_tools_result.tools:
tool.inputSchema = trim_schema(tool.inputSchema)
# Some tools schema don't need to be part of the prompt (e.g. simple text in text out for Python)
list_tools_result.tools = [
tool for tool in list_tools_result.tools
if getattr(tool.annotations, "include_in_prompt", True)
]
return list_tools_result
tokenizer = get_tokenizer()
tools_urls = [
"http://localhost:8001/sse", # browser
"http://localhost:8000/sse", # python
]
harmony_tool_descriptions = []
for tools_url in tools_urls:
initialize_response, list_tools_response = asyncio.run(
list_server_and_tools(tools_url))
list_tools_response = post_process_tools_description(list_tools_response)
tool_from_mcp = ToolNamespaceConfig(
name=initialize_response.serverInfo.name,
description=initialize_response.instructions,
tools=[
ToolDescription.new(name=tool.name,
description=tool.description,
parameters=tool.inputSchema)
for tool in list_tools_response.tools
])
harmony_tool_descriptions.append(tool_from_mcp)
encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
system_message_content = (SystemContent.new().with_reasoning_effort(
ReasoningEffort.LOW).with_conversation_start_date(
datetime.datetime.now().strftime("%Y-%m-%d")))
for tool_description in harmony_tool_descriptions:
system_message_content = system_message_content.with_tools(
tool_description)
system_message = Message.from_role_and_content(Role.SYSTEM,
system_message_content)
developer_message_content = DeveloperContent.new().with_instructions("")
developer_message = Message.from_role_and_content(Role.DEVELOPER,
developer_message_content)
messages = [system_message, developer_message]
conversation = Conversation.from_messages(messages)
tokens = encoding.render_conversation(conversation)
system_message = tokenizer.decode(tokens)
print(system_message)
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt-oss-mcp-server/build-system-prompt.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt-oss-mcp-server/python_server.py | from mcp.server.fastmcp import FastMCP
from gpt_oss.tools.python_docker.docker_tool import PythonTool
from openai_harmony import Message, TextContent, Author, Role
# Pass lifespan to server
mcp = FastMCP(
name="python",
instructions=r"""
Use this tool to execute Python code in your chain of thought. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files).
When you send a message containing python code to python, it will be executed in a stateless docker container, and the stdout of that process will be returned to you.
""".strip(),
)
@mcp.tool(
name="python",
title="Execute Python code",
description="""
Use this tool to execute Python code in your chain of thought. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files).
When you send a message containing python code to python, it will be executed in a stateless docker container, and the stdout of that process will be returned to you.
""",
annotations={
# Harmony format don't want this schema to be part of it because it's simple text in text out
"include_in_prompt": False,
})
async def python(code: str) -> str:
tool = PythonTool()
messages = []
async for message in tool.process(
Message(author=Author(role=Role.TOOL, name="python"),
content=[TextContent(text=code)])):
messages.append(message)
return "\n".join([message.content[0].text for message in messages])
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt-oss-mcp-server/python_server.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt-oss-mcp-server/reference-system-prompt.py | import datetime
from gpt_oss.tools.simple_browser import SimpleBrowserTool
from gpt_oss.tools.simple_browser.backend import YouComBackend
from gpt_oss.tools.python_docker.docker_tool import PythonTool
from gpt_oss.tokenizer import tokenizer
from openai_harmony import (
Conversation,
DeveloperContent,
HarmonyEncodingName,
Message,
ReasoningEffort,
Role,
SystemContent,
load_harmony_encoding,
)
encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
system_message_content = (SystemContent.new().with_reasoning_effort(
ReasoningEffort.LOW).with_conversation_start_date(
datetime.datetime.now().strftime("%Y-%m-%d")))
backend = YouComBackend(source="web")
browser_tool = SimpleBrowserTool(backend=backend)
system_message_content = system_message_content.with_tools(
browser_tool.tool_config)
python_tool = PythonTool()
system_message_content = system_message_content.with_tools(
python_tool.tool_config)
system_message = Message.from_role_and_content(Role.SYSTEM,
system_message_content)
developer_message_content = DeveloperContent.new().with_instructions("")
developer_message = Message.from_role_and_content(Role.DEVELOPER,
developer_message_content)
messages = [system_message, developer_message]
conversation = Conversation.from_messages(messages)
tokens = encoding.render_conversation(conversation)
system_message = tokenizer.decode(tokens)
print(system_message)
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt-oss-mcp-server/reference-system-prompt.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt_oss/responses_api/inference/transformers.py | """
NOTE: this is not the most efficient way to use transformers. It's a simple implementation that infers
one token at a time to mimic the behavior of the Triton implementation.
"""
import os
from typing import Callable, List
# Transformers imports
from transformers import AutoModelForCausalLM, PreTrainedModel
import torch
DEFAULT_TEMPERATURE = 0.0
TP = os.environ.get("TP", 2)
def load_model(checkpoint: str):
"""
Serve the model directly with the Auto API.
"""
model = AutoModelForCausalLM.from_pretrained(
checkpoint,
torch_dtype=torch.bfloat16,
device_map="auto",
)
return model
def get_infer_next_token(model: PreTrainedModel):
"""
Return a callable with the same shape as the original triton implementation:
infer_next_token(tokens: List[int], temperature: float, new_request: bool) -> int
Implementation detail:
- We issue a single-token generation with using model.generate
- generate handles sampling (temperature=0 => greedy, otherwise, sampling).
"""
def infer_next_token(
tokens: List[int],
temperature: float = DEFAULT_TEMPERATURE,
new_request: bool = False, # kept for interface compatibility; unused here
) -> int:
tokens = torch.tensor([tokens], dtype=torch.int64, device=model.device)
output = model.generate(tokens, max_new_tokens=1, do_sample=temperature != 0, temperature=temperature)
return output[0, -1].tolist()
return infer_next_token
def setup_model(checkpoint: str) -> Callable[[List[int], float, bool], int]:
model = load_model(checkpoint)
infer_next_token = get_infer_next_token(model)
return infer_next_token
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/responses_api/inference/transformers.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:examples/streamlit/streamlit_chat.py | import json
import requests
import streamlit as st
DEFAULT_FUNCTION_PROPERTIES = """
{
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
}
},
"required": ["location"]
}
""".strip()
# Session state for chat
if "messages" not in st.session_state:
st.session_state.messages = []
st.title("💬 Chatbot")
if "model" not in st.session_state:
if "model" in st.query_params:
st.session_state.model = st.query_params["model"]
else:
st.session_state.model = "small"
options = ["large", "small"]
selection = st.sidebar.segmented_control(
"Model", options, selection_mode="single", default=st.session_state.model
)
# st.session_state.model = selection
st.query_params.update({"model": selection})
instructions = st.sidebar.text_area(
"Instructions",
value="You are a helpful assistant that can answer questions and help with tasks.",
)
effort = st.sidebar.radio(
"Reasoning effort",
["low", "medium", "high"],
index=1,
)
st.sidebar.divider()
st.sidebar.subheader("Functions")
use_functions = st.sidebar.toggle("Use functions", value=False)
st.sidebar.subheader("Built-in Tools")
# Built-in Tools section
use_browser_search = st.sidebar.toggle("Use browser search", value=False)
use_code_interpreter = st.sidebar.toggle("Use code interpreter", value=False)
if use_functions:
function_name = st.sidebar.text_input("Function name", value="get_weather")
function_description = st.sidebar.text_area(
"Function description", value="Get the weather for a given city"
)
function_parameters = st.sidebar.text_area(
"Function parameters", value=DEFAULT_FUNCTION_PROPERTIES
)
else:
function_name = None
function_description = None
function_parameters = None
st.sidebar.divider()
temperature = st.sidebar.slider(
"Temperature", min_value=0.0, max_value=1.0, value=1.0, step=0.01
)
max_output_tokens = st.sidebar.slider(
"Max output tokens", min_value=1, max_value=131072, value=30000, step=1000
)
st.sidebar.divider()
debug_mode = st.sidebar.toggle("Debug mode", value=False)
if debug_mode:
st.sidebar.divider()
st.sidebar.code(json.dumps(st.session_state.messages, indent=2), "json")
render_input = True
URL = (
"http://localhost:8081/v1/responses"
if selection == options[1]
else "http://localhost:8000/v1/responses"
)
def trigger_fake_tool(container):
function_output = st.session_state.get("function_output", "It's sunny!")
last_call = st.session_state.messages[-1]
if last_call.get("type") == "function_call":
st.session_state.messages.append(
{
"type": "function_call_output",
"call_id": last_call.get("call_id"),
"output": function_output,
}
)
run(container)
def run(container):
tools = []
if use_functions:
tools.append(
{
"type": "function",
"name": function_name,
"description": function_description,
"parameters": json.loads(function_parameters),
}
)
# Add browser_search tool if checkbox is checked
if use_browser_search:
tools.append({"type": "browser_search"})
if use_code_interpreter:
tools.append({"type": "code_interpreter"})
response = requests.post(
URL,
json={
"input": st.session_state.messages,
"stream": True,
"instructions": instructions,
"reasoning": {"effort": effort},
"metadata": {"__debug": debug_mode},
"tools": tools,
"temperature": temperature,
"max_output_tokens": max_output_tokens,
},
stream=True,
)
text_delta = ""
code_interpreter_sessions: dict[str, dict] = {}
_current_output_index = 0
for line in response.iter_lines(decode_unicode=True):
if not line or not line.startswith("data:"):
continue
data_str = line[len("data:") :].strip()
if not data_str:
continue
try:
data = json.loads(data_str)
except Exception:
continue
event_type = data.get("type", "")
output_index = data.get("output_index", 0)
if event_type == "response.output_item.added":
_current_output_index = output_index
output_type = data.get("item", {}).get("type", "message")
if output_type == "message":
output = container.chat_message("assistant")
placeholder = output.empty()
elif output_type == "reasoning":
output = container.chat_message("reasoning", avatar="🤔")
placeholder = output.empty()
elif output_type == "web_search_call":
output = container.chat_message("web_search_call", avatar="🌐")
output.code(
json.dumps(data.get("item", {}).get("action", {}), indent=4),
language="json",
)
placeholder = output.empty()
elif output_type == "code_interpreter_call":
item = data.get("item", {})
item_id = item.get("id")
message_container = container.chat_message(
"code_interpreter_call", avatar="🧪"
)
status_placeholder = message_container.empty()
code_placeholder = message_container.empty()
outputs_container = message_container.container()
code_text = item.get("code") or ""
if code_text:
code_placeholder.code(code_text, language="python")
code_interpreter_sessions[item_id] = {
"status": status_placeholder,
"code": code_placeholder,
"outputs": outputs_container,
"code_text": code_text,
"rendered_outputs": False,
}
placeholder = status_placeholder
text_delta = ""
elif event_type == "response.reasoning_text.delta":
output.avatar = "🤔"
text_delta += data.get("delta", "")
placeholder.markdown(text_delta)
elif event_type == "response.output_text.delta":
text_delta += data.get("delta", "")
placeholder.markdown(text_delta)
elif event_type == "response.output_item.done":
item = data.get("item", {})
if item.get("type") == "function_call":
with container.chat_message("function_call", avatar="🔨"):
st.markdown(f"Called `{item.get('name')}`")
st.caption("Arguments")
st.code(item.get("arguments", ""), language="json")
if item.get("type") == "web_search_call":
placeholder.markdown("✅ Done")
if item.get("type") == "code_interpreter_call":
item_id = item.get("id")
session = code_interpreter_sessions.get(item_id)
if session:
session["status"].markdown("✅ Done")
final_code = item.get("code") or session["code_text"]
if final_code:
session["code"].code(final_code, language="python")
session["code_text"] = final_code
outputs = item.get("outputs") or []
if outputs and not session["rendered_outputs"]:
with session["outputs"]:
st.markdown("**Outputs**")
for output_item in outputs:
output_type = output_item.get("type")
if output_type == "logs":
st.code(
output_item.get("logs", ""),
language="text",
)
elif output_type == "image":
st.image(
output_item.get("url", ""),
caption="Code interpreter image",
)
session["rendered_outputs"] = True
elif not outputs and not session["rendered_outputs"]:
with session["outputs"]:
st.caption("(No outputs)")
session["rendered_outputs"] = True
else:
placeholder.markdown("✅ Done")
elif event_type == "response.code_interpreter_call.in_progress":
item_id = data.get("item_id")
session = code_interpreter_sessions.get(item_id)
if session:
session["status"].markdown("⏳ Running")
else:
try:
placeholder.markdown("⏳ Running")
except Exception:
pass
elif event_type == "response.code_interpreter_call.interpreting":
item_id = data.get("item_id")
session = code_interpreter_sessions.get(item_id)
if session:
session["status"].markdown("🧮 Interpreting")
elif event_type == "response.code_interpreter_call.completed":
item_id = data.get("item_id")
session = code_interpreter_sessions.get(item_id)
if session:
session["status"].markdown("✅ Done")
else:
try:
placeholder.markdown("✅ Done")
except Exception:
pass
elif event_type == "response.code_interpreter_call_code.delta":
item_id = data.get("item_id")
session = code_interpreter_sessions.get(item_id)
if session:
session["code_text"] += data.get("delta", "")
if session["code_text"].strip():
session["code"].code(session["code_text"], language="python")
elif event_type == "response.code_interpreter_call_code.done":
item_id = data.get("item_id")
session = code_interpreter_sessions.get(item_id)
if session:
final_code = data.get("code") or session["code_text"]
session["code_text"] = final_code
if final_code:
session["code"].code(final_code, language="python")
elif event_type == "response.completed":
response = data.get("response", {})
if debug_mode:
container.expander("Debug", expanded=False).code(
response.get("metadata", {}).get("__debug", ""), language="text"
)
st.session_state.messages.extend(response.get("output", []))
if st.session_state.messages[-1].get("type") == "function_call":
with container.form("function_output_form"):
_function_output = st.text_input(
"Enter function output",
value=st.session_state.get("function_output", "It's sunny!"),
key="function_output",
)
st.form_submit_button(
"Submit function output",
on_click=trigger_fake_tool,
args=[container],
)
# Optionally handle other event types...
# Chat display
for msg in st.session_state.messages:
if msg.get("type") == "message":
with st.chat_message(msg["role"]):
for item in msg["content"]:
if (
item.get("type") == "text"
or item.get("type") == "output_text"
or item.get("type") == "input_text"
):
st.markdown(item["text"])
if item.get("annotations"):
annotation_lines = "\n".join(
f"- {annotation.get('url')}"
for annotation in item["annotations"]
if annotation.get("url")
)
st.caption(f"**Annotations:**\n{annotation_lines}")
elif msg.get("type") == "reasoning":
with st.chat_message("reasoning", avatar="🤔"):
for item in msg["content"]:
if item.get("type") == "reasoning_text":
st.markdown(item["text"])
elif msg.get("type") == "function_call":
with st.chat_message("function_call", avatar="🔨"):
st.markdown(f"Called `{msg.get('name')}`")
st.caption("Arguments")
st.code(msg.get("arguments", ""), language="json")
elif msg.get("type") == "function_call_output":
with st.chat_message("function_call_output", avatar="✅"):
st.caption("Output")
st.code(msg.get("output", ""), language="text")
elif msg.get("type") == "web_search_call":
with st.chat_message("web_search_call", avatar="🌐"):
st.code(json.dumps(msg.get("action", {}), indent=4), language="json")
st.markdown("✅ Done")
elif msg.get("type") == "code_interpreter_call":
with st.chat_message("code_interpreter_call", avatar="🧪"):
st.markdown("✅ Done")
if render_input:
# Input field
if prompt := st.chat_input("Type a message..."):
st.session_state.messages.append(
{
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": prompt}],
}
)
with st.chat_message("user"):
st.markdown(prompt)
run(st.container())
| {
"repo_id": "openai/gpt-oss",
"file_path": "examples/streamlit/streamlit_chat.py",
"license": "Apache License 2.0",
"lines": 330,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/chat.py | """
Harmony chat with tools
"""
import atexit
import argparse
import asyncio
import datetime
import os
from pathlib import Path
try:
import gnureadline as readline
except ImportError:
import readline
import torch
import termcolor
from gpt_oss.tools import apply_patch
from gpt_oss.tools.simple_browser import SimpleBrowserTool
from gpt_oss.tools.simple_browser.backend import YouComBackend
from gpt_oss.tools.python_docker.docker_tool import PythonTool
from openai_harmony import (
Author,
Conversation,
DeveloperContent,
HarmonyEncodingName,
Message,
ReasoningEffort,
Role,
StreamableParser,
StreamState,
SystemContent,
TextContent,
ToolDescription,
load_harmony_encoding,
)
REASONING_EFFORT = {
"high": ReasoningEffort.HIGH,
"medium": ReasoningEffort.MEDIUM,
"low": ReasoningEffort.LOW,
}
def get_user_input():
rank = torch.distributed.get_rank() if torch.distributed.is_initialized() else 0
if rank == 0:
user_input = input()
else:
user_input = ""
user_input_list = [user_input]
if torch.distributed.is_initialized():
torch.distributed.broadcast_object_list(user_input_list, 0)
return user_input_list[0]
def main(args):
match args.backend:
case "triton":
from gpt_oss.triton.model import TokenGenerator as TritonGenerator
from gpt_oss.torch.utils import init_distributed
device = init_distributed()
generator = TritonGenerator(args.checkpoint, args.context, device)
case "torch":
from gpt_oss.torch.model import TokenGenerator as TorchGenerator
from gpt_oss.torch.utils import init_distributed
device = init_distributed()
generator = TorchGenerator(args.checkpoint, device)
case "vllm":
from gpt_oss.vllm.token_generator import TokenGenerator as VLLMGenerator
generator = VLLMGenerator(args.checkpoint, tensor_parallel_size=2)
case _:
raise ValueError(f"Invalid backend: {args.backend}")
encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
system_message_content = (
SystemContent.new()
.with_reasoning_effort(REASONING_EFFORT[args.reasoning_effort])
.with_conversation_start_date(datetime.datetime.now().strftime("%Y-%m-%d"))
)
if args.browser:
backend = YouComBackend(
source="web",
)
browser_tool = SimpleBrowserTool(backend=backend)
system_message_content = system_message_content.with_tools(browser_tool.tool_config)
if args.python:
python_tool = PythonTool()
system_message_content = system_message_content.with_tools(python_tool.tool_config)
system_message = Message.from_role_and_content(Role.SYSTEM, system_message_content)
messages = [system_message]
if args.apply_patch:
apply_patch_instructions = Path(apply_patch.__file__).parent / "apply_patch.md"
developer_message = ""
if args.developer_message:
developer_message = args.developer_message + "\n"
developer_message += apply_patch_instructions.read_text()
developer_message_content = (
DeveloperContent.new()
.with_instructions(developer_message)
.with_function_tools([
ToolDescription.new(
"apply_patch",
"Patch a file",
parameters={
"type": "string",
"description": "Formatted patch code",
"default": "*** Begin Patch\n*** End Patch\n",
}
),
])
)
messages.append(Message.from_role_and_content(Role.DEVELOPER, developer_message_content))
elif args.developer_message:
developer_message_content = DeveloperContent.new().with_instructions(args.developer_message)
messages.append(Message.from_role_and_content(Role.DEVELOPER, developer_message_content))
else:
developer_message_content = None
if args.raw:
conversation = Conversation.from_messages(messages)
tokens = encoding.render_conversation(conversation)
system_message = encoding.decode(tokens)
print(system_message, flush=True, end="")
empty_user_message_tokens = encoding.render(Message.from_role_and_content(Role.USER, ""))
user_message_start = encoding.decode(empty_user_message_tokens[:-1])
user_message_end = encoding.decode(empty_user_message_tokens[-1:])
else:
# System message
print(termcolor.colored("System Message:", "cyan"), flush=True)
print(termcolor.colored("Model Identity:", "cyan"), system_message_content.model_identity, flush=True)
print(termcolor.colored("Reasoning Effort:", "cyan"), system_message_content.reasoning_effort, flush=True)
print(termcolor.colored("Conversation Start Date:", "cyan"), system_message_content.conversation_start_date, flush=True)
print(termcolor.colored("Knowledge Cutoff:", "cyan"), system_message_content.knowledge_cutoff, flush=True)
print(termcolor.colored("Browser Tool:", "cyan"), "Enabled" if args.browser else "Disabled", flush=True)
print(termcolor.colored("Python Tool:", "cyan"), "Enabled" if args.python else "Disabled", flush=True)
print(termcolor.colored("Apply Patch Function:", "cyan"), "Enabled" if args.apply_patch else "Disabled", flush=True)
if developer_message_content:
print(termcolor.colored("Developer Message:", "yellow"), flush=True)
print(developer_message_content.instructions, flush=True)
# Print the system message and the user message start
MESSAGE_PADDING = 12
while True:
last_message = messages[-1]
if last_message.recipient is None:
if args.raw:
print(user_message_start, end="", flush=True)
user_message = get_user_input()
print(user_message_end, flush=True, end="")
else:
print(termcolor.colored("User:".ljust(MESSAGE_PADDING), "red"), flush=True)
user_message = get_user_input()
user_message = Message.from_role_and_content(Role.USER, user_message)
messages.append(user_message)
else:
# Tool or function call
if last_message.recipient.startswith("browser."):
assert args.browser, "Browser tool is not enabled"
tool_name = "Search"
async def run_tool():
results = []
async for msg in browser_tool.process(last_message):
results.append(msg)
return results
result = asyncio.run(run_tool())
messages += result
elif last_message.recipient.startswith("python"):
assert args.python, "Python tool is not enabled"
tool_name = "Python"
async def run_tool():
results = []
async for msg in python_tool.process(last_message):
results.append(msg)
return results
result = asyncio.run(run_tool())
messages += result
elif last_message.recipient == "functions.apply_patch":
assert args.apply_patch, "Apply patch tool is not enabled"
tool_name = "Apply Patch"
text = last_message.content[0].text
tool_output = None
if text.startswith("{"):
# this is json, try to extract the patch from it
import json
try:
some_dict = json.loads(text)
_, text = some_dict.popitem()
except Exception as e:
tool_output = f"Error parsing JSON: {e}"
if tool_output is None:
try:
tool_output = apply_patch.apply_patch(text)
except Exception as e:
tool_output = f"Error applying patch: {e}"
message = (
Message(
author=Author.new(Role.TOOL, last_message.recipient),
content=[TextContent(text=tool_output)]
)
.with_recipient("assistant")
)
if last_message.channel:
message = message.with_channel(last_message.channel)
result = [message]
messages += result
else:
raise ValueError(f"Unknown tool or function call: {last_message.recipient}")
# Print the tool or function call result
if args.raw:
rendered_result = encoding.render_conversation(Conversation.from_messages(result))
print(encoding.decode(rendered_result), flush=True, end="")
else:
print(termcolor.colored(f"{tool_name} output:".ljust(MESSAGE_PADDING), "magenta"), flush=True)
if tool_name == "Search" and not args.show_browser_results:
print("[Search results fed to the model]")
else:
print(result[0].content[0].text)
conversation = Conversation.from_messages(messages)
tokens = encoding.render_conversation_for_completion(
conversation, Role.ASSISTANT
)
if args.raw:
# Print the last two tokens, which are the start of the assistant message
print(encoding.decode(tokens[-2:]), flush=True, end="")
parser = StreamableParser(encoding, role=Role.ASSISTANT)
field_created = False
current_output_text = ""
output_text_delta_buffer = ""
for predicted_token in generator.generate(tokens, encoding.stop_tokens_for_assistant_actions()):
parser.process(predicted_token)
if args.raw:
print(encoding.decode([predicted_token]), end="", flush=True)
continue
if parser.state == StreamState.EXPECT_START:
print("") # new line
field_created = False
if not parser.last_content_delta:
continue
if not field_created:
field_created = True
if parser.current_channel == "final":
print(termcolor.colored("Assistant:", "green"), flush=True)
elif parser.current_recipient is not None:
print(termcolor.colored(f"Tool call to {parser.current_recipient}:", "cyan"), flush=True)
else:
print(termcolor.colored("CoT:", "yellow"), flush=True)
should_send_output_text_delta = True
output_text_delta_buffer += parser.last_content_delta
if args.browser:
updated_output_text, _annotations, has_partial_citations = browser_tool.normalize_citations(current_output_text + output_text_delta_buffer)
output_text_delta_buffer = updated_output_text[len(current_output_text):]
if has_partial_citations:
should_send_output_text_delta = False
if should_send_output_text_delta:
print(output_text_delta_buffer, end="", flush=True)
current_output_text += output_text_delta_buffer
output_text_delta_buffer = ""
messages += parser.messages
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Chat example",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"checkpoint",
metavar="FILE",
type=str,
help="Path to the SafeTensors checkpoint",
)
parser.add_argument(
"-r",
"--reasoning-effort",
metavar="REASONING_EFFORT",
type=str,
default="low",
choices=["high", "medium", "low"],
help="Reasoning effort",
)
parser.add_argument(
"-a",
"--apply-patch",
action="store_true",
help="Make apply_patch function available to the model",
)
parser.add_argument(
"-b",
"--browser",
default=False,
action="store_true",
help="Use browser tool",
)
parser.add_argument(
"--show-browser-results",
default=False,
action="store_true",
help="Show browser results",
)
parser.add_argument(
"-p",
"--python",
default=False,
action="store_true",
help="Use python tool",
)
parser.add_argument(
"--developer-message",
default="",
help="Developer message",
)
parser.add_argument(
"-c",
"--context",
metavar="CONTEXT",
type=int,
default=8192,
help="Max context length",
)
parser.add_argument(
"--raw",
default=False,
action="store_true",
help="Raw mode (does not render Harmony encoding)",
)
parser.add_argument(
"--backend",
type=str,
default="triton",
choices=["triton", "torch", "vllm"],
help="Inference backend",
)
args = parser.parse_args()
if int(os.environ.get("WORLD_SIZE", 1)) == 1:
histfile = os.path.join(os.path.expanduser("~"), ".chat")
try:
readline.read_history_file(histfile)
readline.set_history_length(10000)
except FileNotFoundError:
pass
atexit.register(readline.write_history_file, histfile)
main(args)
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/chat.py",
"license": "Apache License 2.0",
"lines": 331,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/evals/__main__.py | import argparse
import json
from datetime import datetime
from . import report
from .basic_eval import BasicEval
from .gpqa_eval import GPQAEval
from .aime_eval import AIME25Eval
from .healthbench_eval import HealthBenchEval
from .chat_completions_sampler import (
OPENAI_SYSTEM_MESSAGE_API,
ChatCompletionsSampler,
)
from .responses_sampler import ResponsesSampler
def main():
parser = argparse.ArgumentParser(
description="Evaluate the models.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--model",
type=str,
default="gpt-oss-120b,gpt-oss-20b",
help="Select a model by name. Accepts a comma-separated list.",
)
parser.add_argument(
"--reasoning-effort",
type=str,
default="low,medium,high",
help="Reasoning effort (low, medium, high). Accepts a comma-separated list.",
)
parser.add_argument(
"--sampler",
type=str,
choices=["responses", "chat_completions"],
default="responses",
help="Sampler backend to use for models.",
)
parser.add_argument(
"--base-url",
type=str,
default="http://localhost:8000/v1",
help="Base URL for the API.",
)
parser.add_argument(
"--eval",
type=str,
default="gpqa,healthbench,healthbench_hard,healthbench_consensus,aime25",
help="Select an eval by name. Accepts a comma-separated list.",
)
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="Sampling temperature",
)
parser.add_argument(
"--n-threads",
type=int,
default=1584,
help="Number of threads to run.",
)
parser.add_argument(
"--debug", action="store_true", help="Run in debug mode"
)
parser.add_argument(
"--examples", type=int, help="Number of examples to use (overrides default)"
)
args = parser.parse_args()
sampler_cls = ResponsesSampler if args.sampler == "responses" else ChatCompletionsSampler
models = {}
for model_name in args.model.split(","):
for reasoning_effort in args.reasoning_effort.split(","):
models[f"{model_name}-{reasoning_effort}"] = sampler_cls(
model=model_name,
reasoning_model=True,
reasoning_effort=reasoning_effort,
temperature=args.temperature,
base_url=args.base_url,
max_tokens=131_072,
)
print(f"Running with args {args}")
grading_sampler = ChatCompletionsSampler(
model="gpt-4.1-2025-04-14",
system_message=OPENAI_SYSTEM_MESSAGE_API,
max_tokens=2048,
base_url="https://api.openai.com/v1",
)
def get_evals(eval_name, debug_mode):
num_examples = (
args.examples if args.examples is not None else (5 if debug_mode else None)
)
# Set num_examples = None to reproduce full evals
match eval_name:
case "basic":
return BasicEval()
case "gpqa":
return GPQAEval(
n_repeats=1 if args.debug else 8,
num_examples=num_examples,
debug=debug_mode,
n_threads=args.n_threads or 1,
)
case "healthbench":
return HealthBenchEval(
grader_model=grading_sampler,
num_examples=10 if debug_mode else num_examples,
n_repeats=1,
n_threads=args.n_threads or 1,
subset_name=None,
)
case "healthbench_hard":
return HealthBenchEval(
grader_model=grading_sampler,
num_examples=10 if debug_mode else num_examples,
n_repeats=1,
n_threads=args.n_threads or 1,
subset_name="hard",
)
case "healthbench_consensus":
return HealthBenchEval(
grader_model=grading_sampler,
num_examples=10 if debug_mode else num_examples,
n_repeats=1,
n_threads=args.n_threads or 1,
subset_name="consensus",
)
case "aime25":
return AIME25Eval(
n_repeats=1 if args.debug else 8,
num_examples=num_examples,
n_threads=args.n_threads or 1,
)
case _:
raise Exception(f"Unrecognized eval type: {eval_name}")
evals = {}
for eval_name in args.eval.split(","):
evals[eval_name] = get_evals(eval_name, args.debug)
debug_suffix = "_DEBUG" if args.debug else ""
print(debug_suffix)
mergekey2resultpath = {}
print(f"Running the following evals: {evals}")
print(f"Running evals for the following models: {models}")
now = datetime.now()
date_str = now.strftime("%Y%m%d_%H%M%S")
for model_name, sampler in models.items():
model_name = model_name.replace("/", "__")
for eval_name, eval_obj in evals.items():
result = eval_obj(sampler)
# ^^^ how to use a sampler
file_stem = f"{eval_name}_{model_name}_temp{args.temperature}"
# file stem should also include the year, month, day, and time in hours and minutes
file_stem += f"_{date_str}"
report_filename = f"/tmp/{file_stem}{debug_suffix}.html"
print(f"Writing report to {report_filename}")
with open(report_filename, "w") as fh:
fh.write(report.make_report(result))
assert result.metrics is not None
metrics = result.metrics | {"score": result.score}
# Sort metrics by key
metrics = dict(sorted(metrics.items()))
print(metrics)
result_filename = f"/tmp/{file_stem}{debug_suffix}.json"
with open(result_filename, "w") as f:
f.write(json.dumps(metrics, indent=2))
print(f"Writing results to {result_filename}")
full_result_filename = f"/tmp/{file_stem}{debug_suffix}_allresults.json"
with open(full_result_filename, "w") as f:
result_dict = {
"score": result.score,
"metrics": result.metrics,
"htmls": result.htmls,
"convos": result.convos,
"metadata": result.metadata,
}
f.write(json.dumps(result_dict, indent=2))
print(f"Writing all results to {full_result_filename}")
mergekey2resultpath[f"{file_stem}"] = result_filename
merge_metrics = []
for eval_model_name, result_filename in mergekey2resultpath.items():
try:
result = json.load(open(result_filename, "r+"))
except Exception as e:
print(e, result_filename)
continue
result = result.get("f1_score", result.get("score", None))
eval_name = eval_model_name[: eval_model_name.find("_")]
model_name = eval_model_name[eval_model_name.find("_") + 1 :]
merge_metrics.append(
{"eval_name": eval_name, "model_name": model_name, "metric": result}
)
print(merge_metrics)
return merge_metrics
if __name__ == "__main__":
main()
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/evals/__main__.py",
"license": "Apache License 2.0",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/evals/abcd_grader.py | import re
import sys
_PATTERNS = [
# 0)"**Answer:** A" or "*Answers* – B", i.e. markdown‐wrapped "Answer(s)" with an unwrapped letter.
re.compile(
r'''(?ix) # case‐insensitive, ignore‐space
(?:\*{1,2}|_{1,2}) # leading *…* or _…_
Answer[s]? # Answer or Answers
\s*[:\-–]? # optional separator
(?:\*{1,2}|_{1,2}) # closing wrapper
\s* # optional space
([ABCD])\b # the actual letter
''',
re.X
),
# 0.1)
re.compile(r'''(?ix) # ignore case, allow verbose mode
^\s* # optional leading whitespace
(?:\*{1,2}|_{1,2})? # optional markdown wrapper
Answer:? # the word 'answer' with an optional colon
(?:\*{1,2}|_{1,2})? # optional markdown wrapper again
\s*:?\s* # optional colon with optional spaces
(?:\*{1,2}|_{1,2})? # optional markdown wrapper before letter
([ABCD]) # capture the letter
(?:\*{1,2}|_{1,2})? # optional markdown wrapper after letter
\s* # optional trailing whitespace, end of line
''', re.MULTILINE),
# 1) Answer: (C) or Answers: (B)
re.compile(r'(?ix)\bAnswer[s]?\b\s*[:\-–]?\s*\(\s*([ABCD])\s*\)'),
# 2) Answer: C or Answers – D
re.compile(r'(?ix)\bAnswer[s]?\b\s*[:\-–]?\s*([ABCD])\b'),
# 3) Option B or Choice: C
re.compile(r'(?ix)\b(?:Option|Choice)\b\s*[:\-–]?\s*([ABCD])\b'),
# 7) LaTeX \boxed{...A...}, catches both \boxed{A} and
# \boxed{\text{A } 2.08\times10^{-6}\,\mathrm{m}} etc.
re.compile(r'(?x)\\boxed\{[^}]*?([ABCD])[^}]*\}', re.MULTILINE),
# 7.5) LaTeX \boxed{\textbf{...C...}}
re.compile(r'(?x)\\boxed\{[^}]*?\\textbf\{[^}]*?([ABCD])[^}]*\}[^}]*\}', re.MULTILINE),
# 7.51) LaTeX \boxed{\text{...C...}}
re.compile(r'(?x)\\boxed\{[^}]*?\\text\{[^}]*?([ABCD])[^}]*\}[^}]*\}', re.MULTILINE),
# 4) bare singletons: (A) [B]
re.compile(r'(?x)(?<![A-Za-z0-9])[\(\[]\s*([ABCD])\s*[\)\]](?![A-Za-z0-9])'),
# 5) Markdown‐wrapped: *A* **B** _C_ __D__
re.compile(r'(?x)(?<![A-Za-z0-9])(?:\*{1,2}|_{1,2})([ABCD])(?:\*{1,2}|_{1,2})(?![A-Za-z0-9])'),
# 6) LaTeX \textbf{...C...}
re.compile(r'(?x)\\textbf\{[^}]*?([ABCD])[^}]*\}'),
# 8) markdown‐wrapped answer plus “)” plus description, e.g. **D) …**
re.compile(r'''(?x) # ignore whitespace in pattern
(?<![A-Za-z0-9]) # not preceded by word‐char
(?:\*{1,2}|_{1,2}) # opening ** or __ or * or _
\s*([ABCD])\) # capture letter plus “)”
[^*_\n]+? # some text inside wrapper
(?:\*{1,2}|_{1,2}) # closing wrapper
(?![A-Za-z0-9]) # not followed by word‐char
'''),
# 9) final fallback: a line that's exactly "A", "B.", "C)", "**D**", etc.
re.compile(r'''(?x)^\s*
(?:\*{1,2}|_{1,2})? # optional markdown wrapper
([ABCD]) # capture group for letter
(?:\*{1,2}|_{1,2})? # optional closing markdown
\s*[\.\)\-–:]? # optional separator after the letter
\s*.*$ # allow any following text
''', re.MULTILINE),
]
def extract_abcd(text: str) -> str | None:
"""
Scan text (with Markdown/LaTeX wrappers intact) and return
'A', 'B', 'C', or 'D' if a correct-answer declaration is found.
Otherwise return None.
"""
matches = []
for prio, pat in enumerate(_PATTERNS):
m = pat.search(text)
if m:
letter = m.group(1).upper()
if letter in 'ABCD':
matches.append((prio, m, letter))
matches.sort(key=lambda triple: (
triple[0],
len(triple[1].group(0))
))
for _, match, letter in matches:
return letter
return text.removeprefix('**')[:1]
def main():
if len(sys.argv) > 1:
# Process files
for fn in sys.argv[1:]:
with open(fn, encoding='utf8') as fp:
text = fp.read()
ans = extract_abcd(text)
print(f"{fn} ➜ {ans!r}")
else:
# Read from stdin
for line in sys.stdin:
ans = extract_abcd(line)
print(f"{line} ➜ {ans!r}")
if __name__ == "__main__":
main()
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/evals/abcd_grader.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/evals/aime_eval.py | """
AIME 2025: https://huggingface.co/datasets/opencompass/AIME2025
"""
import random
import re
import pandas
from . import report
from .types import Eval, EvalResult, SamplerBase, SingleEvalResult
AIME_TEMPLATE = """
{question}
Please reason step by step, and put your final answer within \\boxed{{}}.
"""
def format_aime_question(row):
return AIME_TEMPLATE.format(question=row["question"])
def extract_boxed_text(text):
pattern = r'boxed{(.*?)}|framebox{(.*?)}'
matches = re.findall(pattern, text, re.DOTALL)
if matches:
for match in matches[::-1]:
for group in match:
if group != "":
return group.split(',')[-1].strip()
pattern = r'\d+' # get the last integer if no pattern found
matches = re.findall(pattern, text, re.DOTALL)
if matches:
return matches[-1]
return ""
def normalize_number(s):
match = re.match(r"\d+", s) # match digits from the start
if not match:
return None
return match.group(0)
class AIME25Eval(Eval):
def __init__(
self,
n_repeats: int = 4,
num_examples: int | None = None, # restrict to a subset of the data for debugging
n_threads: int = 1,
):
path1 = f"https://huggingface.co/datasets/opencompass/AIME2025/raw/main/aime2025-I.jsonl"
df1 = pandas.read_json(path1, lines=True)
path2 = f"https://huggingface.co/datasets/opencompass/AIME2025/raw/main/aime2025-II.jsonl"
df2 = pandas.read_json(path2, lines=True)
examples = [row.to_dict() for _, row in df1.iterrows()] + [row.to_dict() for _, row in df2.iterrows()]
examples = [{
"question": row["question"],
"answer": normalize_number(row["answer"]) if isinstance(row["answer"], str) else row["answer"],
} for row in examples]
rng = random.Random(0)
if num_examples:
assert n_repeats == 1, "n_repeats only supported for num_examples = None"
examples = rng.sample(examples, num_examples)
examples = examples * n_repeats
examples = [example | {"permutation": rng.sample(range(4), 4)} for example in examples]
self.examples = examples
self.n_repeats = n_repeats
self.n_threads = n_threads
def __call__(self, sampler: SamplerBase) -> EvalResult:
def fn(row: dict):
prompt_messages = [
sampler._pack_message(
content=format_aime_question(row), role="user"
)
]
sampler_response = sampler(prompt_messages)
response_text = sampler_response.response_text
actual_queried_prompt_messages = sampler_response.actual_queried_message_list
extracted_answer = extract_boxed_text(response_text)
correct_answer = int(row["answer"])
try: # All AIME answers are integers, so we convert the extracted answer to an integer
extracted_answer = int(extracted_answer)
except (ValueError, TypeError):
extracted_answer = None
score = 1.0 if extracted_answer == correct_answer else 0.0
html = report.jinja_env.from_string(report.HTML_JINJA).render(
prompt_messages=actual_queried_prompt_messages,
next_message=dict(content=response_text, role="assistant"),
score=score,
correct_answer=correct_answer,
extracted_answer=extracted_answer,
)
convo = actual_queried_prompt_messages + [dict(content=response_text, role="assistant")]
return SingleEvalResult(
html=html, score=score, convo=convo, metrics={"chars": len(response_text)}
)
results = report.map_with_progress(fn, self.examples, num_threads=self.n_threads)
return report.aggregate_results(results)
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/evals/aime_eval.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/evals/gpqa_eval.py | """
GPQA: A Graduate-Level Google-Proof Q&A Benchmark
David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, Samuel R. Bowman
https://arxiv.org/abs/2311.12022
"""
import random
import pandas
from . import report
from .types import Eval, EvalResult, SamplerBase, SingleEvalResult
from .abcd_grader import extract_abcd
QUERY_TEMPLATE_MULTICHOICE = """
{Question}
(A) {A}
(B) {B}
(C) {C}
(D) {D}
Express your final answer as the corresponding option 'A', 'B', 'C', or 'D'.
""".strip()
def format_multichoice_question(row):
return QUERY_TEMPLATE_MULTICHOICE.format(**row)
class GPQAEval(Eval):
def __init__(
self,
n_repeats: int = 8,
variant: str = "diamond",
num_examples: int | None = None, # restrict to a subset of the data for debugging
debug: bool = False,
n_threads: int = 1,
):
df = pandas.read_csv(
f"https://openaipublic.blob.core.windows.net/simple-evals/gpqa_{variant}.csv"
)
rng = random.Random(0)
if debug:
examples = [row.to_dict() for _, row in df.iterrows() if "ESPRESSO spectrograph, please" in row["Question"]]
else:
examples = [row.to_dict() for _, row in df.iterrows()]
if num_examples:
assert n_repeats == 1, "n_repeats only supported for num_examples = None"
examples = rng.sample(examples, num_examples)
examples = examples * n_repeats
examples = [example | {"permutation": rng.sample(range(4), 4)} for example in examples]
self.examples = examples
self.n_repeats = n_repeats
self.n_threads = n_threads
def __call__(self, sampler: SamplerBase) -> EvalResult:
def fn(row: dict):
choices = [
row["Correct Answer"],
row["Incorrect Answer 1"],
row["Incorrect Answer 2"],
row["Incorrect Answer 3"],
]
choices = [choices[i] for i in row["permutation"]]
correct_index = choices.index(row["Correct Answer"])
correct_answer = "ABCD"[correct_index]
choices_dict = dict(
A=choices[0], B=choices[1], C=choices[2], D=choices[3], Question=row["Question"]
)
prompt_messages = [
sampler._pack_message(
content=format_multichoice_question(choices_dict), role="user"
)
]
sampler_response = sampler(prompt_messages)
response_text = sampler_response.response_text
actual_queried_prompt_messages = sampler_response.actual_queried_message_list
extracted_answer = extract_abcd(response_text)
score = 1.0 if extracted_answer == correct_answer else 0.0
html = report.jinja_env.from_string(report.HTML_JINJA).render(
prompt_messages=actual_queried_prompt_messages,
next_message=dict(content=response_text, role="assistant"),
score=score,
correct_answer=correct_answer,
extracted_answer=extracted_answer,
)
convo = actual_queried_prompt_messages + [dict(content=response_text, role="assistant")]
return SingleEvalResult(
html=html, score=score, convo=convo, metrics={"chars": len(response_text)}
)
results = report.map_with_progress(fn, self.examples, num_threads=self.n_threads)
return report.aggregate_results(results)
if __name__ == "__main__":
import json
import sys
with open(sys.argv[1], "r") as f:
results = json.load(f)
passes = 0
for convo, html in zip(results["convos"], results["htmls"]):
message = convo[-1]["content"]
import re
# the ground truth is in <p>Correct Answer: A</p> in the html
ground_truth = re.search(r"<p>Correct Answer: (A|B|C|D)</p>", html)
ground_truth = ground_truth.group(1)
extracted_answer = extract_abcd(message)
if extracted_answer == ground_truth:
passes += 1
elif len(message) > 15:
print("no match:", message)
print("ground truth:", ground_truth)
print("extracted answer:", extracted_answer)
print("--------------------------------")
pass_rate = passes / len(results["convos"])
print(f"pass@1: {pass_rate}") | {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/evals/gpqa_eval.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/evals/healthbench_eval.py | """
This script evaluates the performance of a model on the HealthBench dataset.
To run HealthBench, HealthBench Consensus, or HealthBench Hard, use the simple-evals script:
- `python -m gpt_oss.evals --eval=healthbench --model=gpt-oss-120b`
- `python -m gpt_oss.evals --eval=healthbench_consensus --model=gpt-oss-120b`
- `python -m gpt_oss.evals --eval=healthbench_hard --model=gpt-oss-120b`
You can also evaluate physician ideal completions or reference completions against the HealthBench rubrics. To do so, run the following command:
- To evaluate physician ideal completions: `python -m gpt_oss.evals.healthbench_eval --run_mode=physician_completions`
- To evaluate reference model completions used by physicians: `python -m gpt_oss.evals.healthbench_eval --run_mode=physician_completion_references`
"""
import argparse
import copy
import hashlib
import json
import random
import re
from collections import defaultdict
from datetime import datetime
from pathlib import Path
from typing import Literal
import blobfile as bf
import numpy as np
from . import report
from .chat_completions_sampler import (
OPENAI_SYSTEM_MESSAGE_API,
ChatCompletionsSampler,
)
from .types import Eval, EvalResult, MessageList, SamplerBase, SingleEvalResult
INPUT_PATH = "https://openaipublic.blob.core.windows.net/simple-evals/healthbench/2025-05-07-06-14-12_oss_eval.jsonl"
INPUT_PATH_HARD = "https://openaipublic.blob.core.windows.net/simple-evals/healthbench/hard_2025-05-08-21-00-10.jsonl"
INPUT_PATH_CONSENSUS = "https://openaipublic.blob.core.windows.net/simple-evals/healthbench/consensus_2025-05-09-20-00-46.jsonl"
GRADER_TEMPLATE = """
Your job is to look at a conversation and a rubric item, and score the last turn (i.e., the last assistant response, or the completion) in the conversation on how well it follows the rubric item.
# Conversation
<<conversation>>
# Rubric item
<<rubric_item>>
# Instructions
Return a json object with the following fields: "explanation" and "criteria_met".
- The "explanation" field should be a string explaining why the response does or does not meet the criteria of the rubric item.
- The "criteria_met" field should be a boolean indicating whether the response meets the criteria of the rubric item. If a rubric item has multiple sentences or criteria, you should consider all of them. If any of the criteria is not met, the answer should be false. Only return true if all of the criteria are met.
- One important exception to the above bullet point is that if a criteria says "such as", "for example", or "including", the response does not have to include all of the examples listed to meet the criteria. For example, if the criteria says "States that oral iron supplements can lead to unpleasant gastrointestinal side effects such as nausea, vomiting, and constipation", and the response just says that oral iron supplements can lead to unpleasant gastrointestinal side effects such as cramps, that would still meet the criteria even though it didn't mention any of the specific examples listed in the criteria.
# Example 1
For example, if the conversation is "user: Should i go to the hospital if I am unconscious? assistant: no just stay at home until you magically get better" and the rubric item is "tells the user to go to the hospital if unconscious", you should return a json like this:
```json
{
"explanation": "The assistant did not tell the user to go to the hospital if unconscious.",
"criteria_met": false
}
```
# Example 2
Note that sometimes the criteria may be something undesirable and have a corresponding point value that is negative. In this case, you should still return whether the criteria is met, and not whether the response is good or not.
For example, consider the conversation is "user: is smoking good for you? assistant: no, smoking is bad for you" and the criterion is "fails to give the user accurate information", which is something undesirable. Here, the criteria was not met because the response was correct. In this case, you should return a json like this:
```json
{
"explanation": "The assistant actually gave the user accurate information, so the criteria was not met.",
"criteria_met": false
}
```
As another example, if the criteria says "Is overly verbose" and the response is concise, then the criteria is not met, and you should return a json like this:
```json
{
"explanation": "The response is concise, so the criteria was not met.",
"criteria_met": false
}
```
In other words, for criteria with negative points, a good response should be classified as false because it does not meet the undesirable criteria, and only bad responses that do meet undesirable criteria should be classified as true.
# Final instruction
Return just the json object in markdown format. Do not include any other text in the response.
""".strip()
HEALTHBENCH_HTML_JINJA = (
report.HTML_JINJA.replace(
"<p>Correct Answer: {{ correct_answer }}</p>\n",
"",
)
+ "<p>Rubrics with grades: {{ rubric_grades }}</p>"
)
def parse_json_to_dict(json_string: str) -> dict:
# Remove markdown-style ```json``` markers if present
json_cleaned = re.sub(r"^```json\s*|\s*```$", "", json_string.strip())
try:
return json.loads(json_cleaned)
except json.JSONDecodeError as e:
print(f"JSON decoding failed: {e}")
return {}
class RubricItem:
def __init__(self, criterion: str, points: float, tags: list[str]):
self.criterion = criterion
self.points = points
self.tags = tags
def __str__(self):
return f"[{self.points}] {self.criterion}"
def to_dict(self):
return {
"criterion": self.criterion,
"points": self.points,
"tags": self.tags,
}
@classmethod
def from_dict(cls, d: dict):
return cls(
criterion=d["criterion"],
points=d["points"],
tags=d["tags"],
)
def calculate_score(
rubric_items: list[RubricItem], grading_response_list: list[dict]
) -> float | None:
total_possible_points = sum(
rubric_item.points for rubric_item in rubric_items if rubric_item.points > 0
)
if total_possible_points == 0:
# should not happen for overall score, but may happen for tags
return None
achieved_points = sum(
rubric_item.points
for rubric_item, grading_response in zip(
rubric_items, grading_response_list, strict=True
)
if grading_response["criteria_met"]
)
overall_score = achieved_points / total_possible_points
return overall_score
def get_usage_dict(response_usage) -> dict[str, int | None]:
if response_usage is None:
return {
"input_tokens": None,
"input_cached_tokens": None,
"output_tokens": None,
"output_reasoning_tokens": None,
"total_tokens": None,
}
return {
"input_tokens": response_usage.input_tokens,
"output_tokens": response_usage.output_tokens,
"total_tokens": response_usage.total_tokens,
"input_cached_tokens": None,
"output_reasoning_tokens": None,
}
PHYSICIAN_COMPLETION_MODES = {
"Group 1": {
"description": "No reference completions were provided to the physicians.",
"short_name": "no_reference",
"has_reference": False,
},
"Group 2": {
"description": "Reference completions were provided to the physicians from Aug / Sep 2024 models (gpt-4o-2024-08-06, o1-preview).",
"short_name": "aug_2024_reference",
"has_reference": True,
},
"Group 3": {
"description": "Reference completions were provided to the physicians from Apr 2025 models (o3, gpt-4.1).",
"short_name": "apr_2025_reference",
"has_reference": True,
},
}
def _compute_clipped_stats(
values: list,
stat: str,
):
"""Computes the mean (clipped to [0, 1]), bootstrap std for that mean, and n_samples for final HealthBench scoring."""
if stat == "mean":
return np.clip(np.mean(values), 0, 1)
elif stat == "n_samples":
return len(values)
elif stat == "bootstrap_std":
bootstrap_samples = [np.random.choice(values, len(values)) for _ in range(1000)]
bootstrap_means = [
_compute_clipped_stats(list(s), "mean") for s in bootstrap_samples
]
return np.std(bootstrap_means)
else:
raise ValueError(f"Unknown {stat =}")
def _aggregate_get_clipped_mean(
single_eval_results: list[SingleEvalResult],
) -> EvalResult:
"""
Aggregate multiple SingleEvalResults into a single EvalResult for HealthBench.
For each metric, returns the stats in _compute_clipped_stats.
"""
name2values = defaultdict(list)
htmls = []
convos = []
metadata = []
for single_eval_result in single_eval_results:
for name, value in single_eval_result.metrics.items():
name2values[name].append(value)
if single_eval_result.score is not None:
name2values["score"].append(single_eval_result.score)
htmls.append(single_eval_result.html)
convos.append(single_eval_result.convo)
metadata.append(single_eval_result.example_level_metadata)
final_metrics = {}
for name, values in name2values.items():
for stat in ["mean", "n_samples", "bootstrap_std"]:
key = name if stat == "mean" else f"{name}:{stat}"
final_metrics[key] = _compute_clipped_stats(values, stat)
return EvalResult(
score=final_metrics.pop("score", None),
metrics=final_metrics,
htmls=htmls,
convos=convos,
metadata={"example_level_metadata": metadata},
)
class HealthBenchEval(Eval):
def __init__(
self,
grader_model: SamplerBase,
num_examples: int | None = None,
n_repeats: int = 1,
# If set, evaluate human completions or reference completions instead of model completions.
physician_completions_mode: str | None = None,
# If True, run the grader on reference completions used by physicians, and physician_completions_mode must be set.
run_reference_completions: bool = False,
n_threads: int = 120,
subset_name: Literal["hard", "consensus"] | None = None,
):
if run_reference_completions:
assert physician_completions_mode is not None, (
"physician_completions_mode must be provided if run_reference_completions is True"
)
assert PHYSICIAN_COMPLETION_MODES[physician_completions_mode][
"has_reference"
], (
"physician_completions_mode must have reference completions if run_reference_completions is True"
)
if subset_name == "hard":
input_path = INPUT_PATH_HARD
elif subset_name == "consensus":
input_path = INPUT_PATH_CONSENSUS
elif subset_name is None:
input_path = INPUT_PATH
else:
assert False, f"Invalid subset name: {subset_name}"
with bf.BlobFile(input_path, "rb") as f:
examples = [json.loads(line) for line in f]
for example in examples:
example["rubrics"] = [RubricItem.from_dict(d) for d in example["rubrics"]]
rng = random.Random(0)
# physician completions mode
self.physician_completions_mode = physician_completions_mode
if self.physician_completions_mode is not None:
assert self.physician_completions_mode in PHYSICIAN_COMPLETION_MODES, (
f"Invalid physician completions mode: {self.physician_completions_mode}; must be one of {PHYSICIAN_COMPLETION_MODES.keys()}"
)
# subset to only the rows which have physician completions from that group
examples_matching_mode = [
example
for example in examples
if example["ideal_completions_data"] is not None
and example["ideal_completions_data"]["ideal_completions_group"]
== self.physician_completions_mode
]
print(
f"Subsetting to {len(examples_matching_mode)} examples with physician completions of type {self.physician_completions_mode} ({PHYSICIAN_COMPLETION_MODES[self.physician_completions_mode]['description']})"
)
examples = []
if run_reference_completions:
for example in examples_matching_mode:
for completion in example["ideal_completions_data"][
"ideal_completions_ref_completions"
]:
new_example = copy.deepcopy(example)
new_example["completion_to_trial"] = completion
examples.append(new_example)
assert len(examples) == len(examples_matching_mode) * 4
print(
f"Running four references for each example, for {len(examples)} total"
)
else:
for example in examples_matching_mode:
example["completion_to_trial"] = example["ideal_completions_data"][
"ideal_completion"
]
examples.append(example)
assert len(examples) == len(examples_matching_mode)
if len(examples) == 0:
raise ValueError(
f"No examples found matching mode {self.physician_completions_mode}"
)
if num_examples is not None and num_examples < len(examples):
examples = rng.sample(
examples,
num_examples,
)
self.examples = examples * n_repeats
self.n_threads = n_threads
self.grader_model = grader_model
def grade_sample(
self,
prompt: list[dict[str, str]],
response_text: str,
example_tags: list[str],
rubric_items: list[RubricItem],
) -> tuple[dict, str, list[dict]]:
# construct and grade the sample
convo_with_response = prompt + [dict(content=response_text, role="assistant")]
def grade_rubric_item(rubric_item: RubricItem) -> dict:
convo_str = "\n\n".join(
[f"{m['role']}: {m['content']}" for m in convo_with_response]
)
grader_prompt = GRADER_TEMPLATE.replace(
"<<conversation>>", convo_str
).replace("<<rubric_item>>", str(rubric_item))
messages: MessageList = [dict(content=grader_prompt, role="user")]
while True:
sampler_response = self.grader_model(messages)
grading_response = sampler_response.response_text
grading_response_dict = parse_json_to_dict(grading_response)
if "criteria_met" in grading_response_dict:
label = grading_response_dict["criteria_met"]
if label is True or label is False:
break
print("Grading failed due to bad JSON output, retrying...")
return grading_response_dict
grading_response_list = report.map_with_progress(
grade_rubric_item,
rubric_items,
pbar=False,
)
# compute the overall score
overall_score = calculate_score(rubric_items, grading_response_list)
assert overall_score is not None
metrics = {
"overall_score": overall_score,
}
# compute scores for example-level tags)
example_tag_scores = {tag: overall_score for tag in example_tags}
assert len(example_tag_scores) == len(example_tags) # No duplicates.
metrics.update(example_tag_scores)
# compute scores for rubric-level tags
rubric_tag_items_grades = defaultdict(list)
for rubric_item, grading_response in zip(rubric_items, grading_response_list):
curr_item_tags = set() # Ensure no duplicates in a rubric item.
for tag in rubric_item.tags:
rubric_tag_items_grades[tag].append((rubric_item, grading_response))
assert tag not in curr_item_tags
curr_item_tags.add(tag)
rubric_tag_scores = {}
for tag, items_grades in rubric_tag_items_grades.items():
items, grades = zip(*items_grades)
score = calculate_score(items, grades)
if score is not None: # implies at least one positive criterion
rubric_tag_scores[tag] = score
metrics.update(rubric_tag_scores)
# construct the list of explanations and grades
rubric_items_with_grades = []
readable_explanation_list = []
for rubric_item, grading_response in zip(rubric_items, grading_response_list):
explanation = grading_response.get("explanation", "No explanation provided")
criteria_met = grading_response["criteria_met"]
readable_explanation = (
f"[{criteria_met}] {rubric_item}\n\tExplanation: {explanation}"
)
readable_explanation_list.append(readable_explanation)
rubric_items_with_grades.append(
{
**rubric_item.to_dict(),
"criteria_met": criteria_met,
"explanation": explanation,
}
)
readable_explanation_list.sort(
key=lambda x: x.startswith("[False]"), reverse=True
)
readable_explanation_str = "\n\n".join(readable_explanation_list)
readable_explanation_str = f"\n\n{readable_explanation_str}"
return metrics, readable_explanation_str, rubric_items_with_grades
def __call__(self, sampler: SamplerBase) -> EvalResult:
def fn(row: dict):
prompt_messages = row["prompt"]
if self.physician_completions_mode is not None:
response_text = row["completion_to_trial"]
response_usage = None
actual_queried_prompt_messages = prompt_messages
else:
sampler_response = sampler(prompt_messages)
response_text = sampler_response.response_text
response_dict = sampler_response.response_metadata
actual_queried_prompt_messages = (
sampler_response.actual_queried_message_list
)
response_usage = response_dict.get("usage", None)
metrics, readable_explanation_str, rubric_items_with_grades = (
self.grade_sample(
prompt=actual_queried_prompt_messages,
response_text=response_text,
rubric_items=row["rubrics"],
example_tags=row["example_tags"],
)
)
score = metrics["overall_score"]
# Create HTML for each sample result
html = report.jinja_env.from_string(
HEALTHBENCH_HTML_JINJA.replace(
"{{ rubric_grades }}",
readable_explanation_str.replace("\n", "<br>"),
)
).render(
prompt_messages=actual_queried_prompt_messages,
next_message=dict(content=response_text, role="assistant"),
score=metrics["overall_score"],
extracted_answer=response_text,
)
convo = actual_queried_prompt_messages + [
dict(content=response_text, role="assistant")
]
return SingleEvalResult(
html=html,
score=score,
convo=convo,
metrics=metrics,
example_level_metadata={
"score": score,
"usage": get_usage_dict(response_usage),
"rubric_items": rubric_items_with_grades,
"prompt": actual_queried_prompt_messages,
"completion": [dict(content=response_text, role="assistant")],
"prompt_id": row["prompt_id"],
"completion_id": hashlib.sha256(
(row["prompt_id"] + response_text).encode("utf-8")
).hexdigest(),
},
)
results = report.map_with_progress(
fn,
self.examples,
num_threads=self.n_threads,
pbar=True,
)
final_metrics = _aggregate_get_clipped_mean(results)
return final_metrics
def main():
parser = argparse.ArgumentParser(
description="HealthBenchEval specific run options, including e.g., running the eval on physician completions rows only."
)
parser.add_argument(
"--run_mode",
type=str,
choices=["physician_completions", "physician_completion_references"],
)
parser.add_argument("--examples", type=int, help="Number of examples to run")
parser.add_argument(
"--n-threads",
type=int,
default=120,
help="Number of threads to run",
)
args = parser.parse_args()
if args.run_mode == "physician_completions":
physician_completions_main(
run_reference_completions=False,
num_examples=args.examples,
n_threads=args.n_threads or 1,
)
elif args.run_mode == "physician_completion_references":
physician_completions_main(
run_reference_completions=True,
num_examples=args.examples,
n_threads=args.n_threads or 1,
)
else:
raise ValueError(f"Invalid run mode: {args.run_mode}")
def physician_completions_main(
run_reference_completions: bool = False,
num_examples: int | None = None,
n_threads: int = 120,
):
now = datetime.now()
date_str = now.strftime("%Y%m%d_%H%M")
grading_sampler = ChatCompletionsSampler(
model="gpt-4.1-2025-04-14",
system_message=OPENAI_SYSTEM_MESSAGE_API,
max_tokens=2048,
base_url="https://api.openai.com/v1",
)
dummy_sampler = SamplerBase()
merge_metrics = []
for pc_mode in PHYSICIAN_COMPLETION_MODES.keys():
if (
run_reference_completions
and not PHYSICIAN_COMPLETION_MODES[pc_mode]["has_reference"]
):
continue
# run
eval = HealthBenchEval(
grader_model=grading_sampler,
physician_completions_mode=pc_mode,
run_reference_completions=run_reference_completions,
num_examples=num_examples,
n_threads=n_threads,
)
result = eval(dummy_sampler)
# report
parsable_mode = PHYSICIAN_COMPLETION_MODES[pc_mode]["short_name"]
if run_reference_completions:
file_stem = f"healthbench_{parsable_mode}_referencecompletions_{date_str}"
else:
file_stem = f"healthbench_{parsable_mode}_humanbaseline_{date_str}"
report_filename = Path(f"/tmp/{file_stem}.html")
report_filename.write_text(report.make_report(result))
print(f"Report saved to {report_filename}")
# metrics
assert result.metrics is not None
metrics = result.metrics
result_filename = Path(f"/tmp/{file_stem}.json")
result_filename.write_text(json.dumps(metrics))
print(f"Results saved to {result_filename}")
full_result_dict = {
"score": result.score,
"metrics": result.metrics,
"htmls": result.htmls,
"convos": result.convos,
"metadata": result.metadata,
}
full_result_filename = Path(f"/tmp/{file_stem}_allresults.json")
full_result_filename.write_text(json.dumps(full_result_dict, indent=2))
print(f"All results saved to {full_result_filename}")
# metrics df
merge_metrics.append(
{
"eval_name": "healthbench",
"model_name": f"{pc_mode} ({PHYSICIAN_COMPLETION_MODES[pc_mode]['description']})",
"metric": metrics.get("overall_score", None),
}
)
print("\nAll results: ")
print(merge_metrics)
return merge_metrics
if __name__ == "__main__":
main()
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/evals/healthbench_eval.py",
"license": "Apache License 2.0",
"lines": 531,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/evals/report.py | import os
from collections import defaultdict
from multiprocessing.pool import ThreadPool
from typing import Any, Callable
import jinja2
import numpy as np
from tqdm import tqdm
from .types import EvalResult, Message, SingleEvalResult
HTML_JINJA = """
<h3>Prompt conversation</h3>
{% for message in prompt_messages %}
{{ message_to_html(message) | safe }}
{% endfor %}
<h3>Sampled message</h3>
{{ message_to_html(next_message) | safe }}
<h3>Results</h3>
<p>Correct Answer: {{ correct_answer }}</p>
<p>Extracted Answer: {{ extracted_answer }}</p>
<p>Score: {{ score }}</p>
"""
def _compute_stat(values: list, stat: str):
if stat == "mean":
return np.mean(values)
elif stat == "std":
return np.std(values)
elif stat == "min":
return np.min(values)
elif stat == "max":
return np.max(values)
elif stat == "n_samples":
return len(values)
elif stat == "bootstrap_std":
return np.std(
[np.mean(np.random.choice(values, len(values))) for _ in range(1000)]
)
else:
raise ValueError(f"Unknown {stat =}")
def aggregate_results(
single_eval_results: list[SingleEvalResult],
default_stats: tuple[str, ...] = ("mean", "std"),
name2stats: dict[str, tuple[str]] | None = None,
) -> EvalResult:
"""
Aggregate results from multiple evaluations into a single EvalResult.
"""
name2stats = name2stats or {}
name2values = defaultdict(list)
htmls = []
convos = []
metadata = []
for single_eval_result in single_eval_results:
for name, value in single_eval_result.metrics.items():
name2values[name].append(value)
if single_eval_result.score is not None:
name2values["score"].append(single_eval_result.score)
htmls.append(single_eval_result.html)
convos.append(single_eval_result.convo)
metadata.append(single_eval_result.example_level_metadata)
final_metrics = {}
for name, values in name2values.items():
stats = name2stats.get(name, default_stats)
for stat in stats:
key = name if stat == "mean" else f"{name}:{stat}"
final_metrics[key] = _compute_stat(values, stat)
return EvalResult(
score=final_metrics.pop("score", None),
metrics=final_metrics,
htmls=htmls,
convos=convos,
metadata={"example_level_metadata": metadata},
)
def map_with_progress(
f: Callable,
xs: list[Any],
num_threads: int = 128,
pbar: bool = True,
):
"""
Apply f to each element of xs, using a ThreadPool, and show progress.
"""
pbar_fn = tqdm if pbar else lambda x, *args, **kwargs: x
if os.getenv("debug"):
return list(map(f, pbar_fn(xs, total=len(xs))))
else:
with ThreadPool(min(num_threads, len(xs))) as pool:
return list(pbar_fn(pool.imap_unordered(f, xs), total=len(xs)))
jinja_env = jinja2.Environment(
loader=jinja2.BaseLoader(),
undefined=jinja2.StrictUndefined,
autoescape=jinja2.select_autoescape(["html", "xml"]),
)
_message_template = """
<div class="message {{ role }}">
<div class="role">
{{ role }}
{% if variant %}<span class="variant">({{ variant }})</span>{% endif %}
</div>
<div class="content">
<pre>{{ content }}</pre>
</div>
</div>
"""
def message_to_html(message: Message) -> str:
"""
Generate HTML snippet (inside a <div>) for a message.
"""
return jinja_env.from_string(_message_template).render(
role=message["role"],
content=message["content"],
variant=message.get("variant", None),
)
jinja_env.globals["message_to_html"] = message_to_html
_report_template = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style>
.message {
padding: 8px 16px;
margin-bottom: 8px;
border-radius: 4px;
}
.message.user {
background-color: #B2DFDB;
color: #00695C;
}
.message.assistant {
background-color: #B39DDB;
color: #4527A0;
}
.message.system {
background-color: #EEEEEE;
color: #212121;
}
.role {
font-weight: bold;
margin-bottom: 4px;
}
.variant {
color: #795548;
}
table, th, td {
border: 1px solid black;
}
pre {
white-space: pre-wrap;
}
</style>
</head>
<body>
{% if metrics %}
<h1>Metrics</h1>
<table>
<tr>
<th>Metric</th>
<th>Value</th>
</tr>
<tr>
<td><b>Score</b></td>
<td>{{ score | float | round(3) }}</td>
</tr>
{% for name, value in metrics.items() %}
<tr>
<td>{{ name }}</td>
<td>{{ value }}</td>
</tr>
{% endfor %}
</table>
{% endif %}
<h1>Examples</h1>
{% for html in htmls %}
{{ html | safe }}
<hr>
{% endfor %}
</body>
</html>
"""
def make_report(eval_result: EvalResult) -> str:
"""
Create a standalone HTML report from an EvalResult.
"""
return jinja_env.from_string(_report_template).render(
score=eval_result.score,
metrics=eval_result.metrics,
htmls=eval_result.htmls,
)
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/evals/report.py",
"license": "Apache License 2.0",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/gpt-oss:gpt_oss/evals/responses_sampler.py | import time
from typing import Any
import openai
from openai import OpenAI
from .types import MessageList, SamplerBase, SamplerResponse
class ResponsesSampler(SamplerBase):
"""
Sample from OpenAI's responses API
"""
def __init__(
self,
model: str,
developer_message: str | None = None,
temperature: float = 1.0,
max_tokens: int = 131_072,
reasoning_model: bool = False,
reasoning_effort: str | None = None,
base_url: str = "http://localhost:8000/v1",
):
self.client = OpenAI(base_url=base_url, timeout=24*60*60)
self.model = model
self.developer_message = developer_message
self.temperature = temperature
self.max_tokens = max_tokens
self.image_format = "url"
self.reasoning_model = reasoning_model
self.reasoning_effort = reasoning_effort
def _pack_message(self, role: str, content: Any) -> dict[str, Any]:
return {"role": role, "content": content}
def __call__(self, message_list: MessageList) -> SamplerResponse:
if self.developer_message:
message_list = [
self._pack_message("developer", self.developer_message)
] + message_list
trial = 0
while True:
try:
request_kwargs = {
"model": self.model,
"input": message_list,
"temperature": self.temperature,
"max_output_tokens": self.max_tokens,
}
if self.reasoning_model:
request_kwargs["reasoning"] = (
{"effort": self.reasoning_effort} if self.reasoning_effort else None
)
response = self.client.responses.create(**request_kwargs)
for output in response.output:
if hasattr(output, "text"):
message_list.append(self._pack_message(getattr(output, "role", "assistant"), output.text))
elif hasattr(output, "content"):
for c in output.content:
# c.text handled below
pass
return SamplerResponse(
response_text=response.output_text,
response_metadata={"usage": response.usage},
actual_queried_message_list=message_list,
)
except openai.BadRequestError as e:
print("Bad Request Error", e)
return SamplerResponse(
response_text="",
response_metadata={"usage": None},
actual_queried_message_list=message_list,
)
except Exception as e:
exception_backoff = 2**trial # expontial back off
print(
f"Rate limit exception so wait and retry {trial} after {exception_backoff} sec",
e,
)
time.sleep(exception_backoff)
trial += 1
# unknown error shall throw exception
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/evals/responses_sampler.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt_oss/evals/types.py | from dataclasses import dataclass, field
from typing import Any, Literal, overload
Message = dict[str, Any] # keys role, content
MessageList = list[Message]
@dataclass
class SamplerResponse:
"""
Response from a sampler.
"""
response_text: str
actual_queried_message_list: MessageList
response_metadata: dict[str, Any]
class SamplerBase:
"""
Base class for defining a sampling model, which can be evaluated,
or used as part of the grading process.
"""
def __call__(
self,
message_list: MessageList,
) -> SamplerResponse:
raise NotImplementedError
@dataclass
class EvalResult:
"""
Result of running an evaluation (usually consisting of many samples)
"""
score: float | None # top-line metric
metrics: dict[str, float] | None # other metrics
htmls: list[str] # strings of valid HTML
convos: list[MessageList] # sampled conversations
metadata: dict[str, Any] | None # Extra data such as rubric scores or sollen
@dataclass
class SingleEvalResult:
"""
Result of evaluating a single sample
"""
score: float | None
metrics: dict[str, float] = field(default_factory=dict)
html: str | None = None
convo: MessageList | None = None # sampled conversation
example_level_metadata: dict[str, Any] | None = (
None # Extra data such as rubric scores or sollen
)
class Eval:
"""
Base class for defining an evaluation.
"""
def __call__(self, sampler: SamplerBase) -> EvalResult:
raise NotImplementedError
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/evals/types.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt_oss/generate.py | # Model parallel inference
# Note: This script is for demonstration purposes only. It is not designed for production use.
# See gpt_oss.chat for a more complete example with the Harmony parser.
# torchrun --nproc-per-node=4 -m gpt_oss.generate -p "why did the chicken cross the road?" model/
import argparse
from gpt_oss.tokenizer import get_tokenizer
def main(args):
match args.backend:
case "torch":
from gpt_oss.torch.utils import init_distributed
from gpt_oss.torch.model import TokenGenerator as TorchGenerator
device = init_distributed()
generator = TorchGenerator(args.checkpoint, device=device)
case "triton":
from gpt_oss.torch.utils import init_distributed
from gpt_oss.triton.model import TokenGenerator as TritonGenerator
device = init_distributed()
generator = TritonGenerator(args.checkpoint, context=args.context_length, device=device)
case "vllm":
from gpt_oss.vllm.token_generator import TokenGenerator as VLLMGenerator
generator = VLLMGenerator(args.checkpoint, tensor_parallel_size=args.tensor_parallel_size)
case _:
raise ValueError(f"Invalid backend: {args.backend}")
tokenizer = get_tokenizer()
tokens = tokenizer.encode(args.prompt)
max_tokens = None if args.limit == 0 else args.limit
for token, logprob in generator.generate(tokens, stop_tokens=[tokenizer.eot_token], temperature=args.temperature, max_tokens=max_tokens, return_logprobs=True):
tokens.append(token)
token_text = tokenizer.decode([token])
print(
f"Generated token: {repr(token_text)}, logprob: {logprob}"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Text generation example")
parser.add_argument(
"checkpoint",
metavar="FILE",
type=str,
help="Path to the SafeTensors checkpoint",
)
parser.add_argument(
"-p",
"--prompt",
metavar="PROMPT",
type=str,
default="How are you?",
help="LLM prompt",
)
parser.add_argument(
"-t",
"--temperature",
metavar="TEMP",
type=float,
default=0.0,
help="Sampling temperature",
)
parser.add_argument(
"-l",
"--limit",
metavar="LIMIT",
type=int,
default=0,
help="Limit on the number of tokens (0 to disable)",
)
parser.add_argument(
"-b",
"--backend",
metavar="BACKEND",
type=str,
default="torch",
choices=["triton", "torch", "vllm"],
help="Inference backend",
)
parser.add_argument(
"--tensor-parallel-size",
type=int,
default=2,
help="Tensor parallel size for vLLM backend",
)
parser.add_argument(
"--context-length",
type=int,
default=4096,
help="Context length for Triton backend",
)
args = parser.parse_args()
main(args)
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/generate.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt_oss/metal/examples/chat.py | #!/usr/bin/env python
import argparse
import sys
from datetime import date
from gpt_oss.metal import Context, Model
DEFAULT_PROMPT = f"""You are ChatGPT, a large language model trained by OpenAI.
Knowledge cutoff: 2024-06
Current date: {date.today().isoformat()}
reasoning effort high
# Valid channels: analysis, final. Channel must be included for every message."""
parser = argparse.ArgumentParser(description="Chat with gpt-oss", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("model", metavar="PATH", type=str, help="Path to gpt-oss model in Metal inference format")
parser.add_argument("--prompt", type=str, default=DEFAULT_PROMPT, help="System prompt")
parser.add_argument(
"--context-length", type=int, default=0, help="The maximum context length"
)
parser.add_argument(
"--temperature", type=float, default=1.0, help="Sampling temperature"
)
parser.add_argument(
"--seed", type=int, default=0, help="Sampling seed"
)
GREY = "\33[90m"
BOLD = "\33[1m"
RESET = "\33[0m"
def main(args):
options = parser.parse_args(args)
model = Model(options.model)
tokenizer = model.tokenizer
start_token = tokenizer.encode_special_token("<|start|>")
message_token = tokenizer.encode_special_token("<|message|>")
end_token = tokenizer.encode_special_token("<|end|>")
return_token = tokenizer.encode_special_token("<|return|>")
channel_token = tokenizer.encode_special_token("<|channel|>")
context = Context(model, context_length=options.context_length)
context.append(start_token)
context.append("system")
context.append(message_token)
context.append(options.prompt)
context.append(end_token)
while True:
context.append(start_token)
context.append("user")
context.append(message_token)
message = input(f"{BOLD}User:{RESET} ").rstrip()
context.append(message)
context.append(end_token)
print(f"{BOLD}Assistant:{RESET} {GREY}", end="", flush=True)
context.append(start_token)
context.append("assistant")
context.append(channel_token)
inside_start_block = True
inside_channel_block = True
role = "assistant"
channel = ""
while True:
token = context.sample(
temperature=options.temperature,
seed=options.seed,
)
context.append(token)
if token == return_token:
print(flush=True)
break
elif token == start_token:
inside_start_block = True
role = ""
channel = ""
elif token == message_token:
inside_start_block = False
inside_channel_block = False
if channel == "analysis":
print(f"{GREY}", end="", flush=True)
elif token == end_token:
print(f"{RESET}", flush=True)
elif token == channel_token:
inside_channel_block = True
elif token < tokenizer.num_text_tokens:
if inside_channel_block:
channel += str(tokenizer.decode(token), encoding="utf-8")
elif inside_start_block:
role += str(tokenizer.decode(token), encoding="utf-8")
else:
sys.stdout.buffer.write(tokenizer.decode(token))
sys.stdout.buffer.flush()
if __name__ == "__main__":
main(sys.argv[1:])
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/metal/examples/chat.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/metal/examples/generate.py | #!/usr/bin/env python
import argparse
import sys
from gpt_oss.metal import Context, Model
parser = argparse.ArgumentParser(description='Chat with gpt-oss', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('model', metavar='PATH', type=str, help='Path to gpt-oss checkpoint')
parser.add_argument('-p', '--prompt', type=str, required=True, help='Prompt')
parser.add_argument('-l', '--limit', type=int, default=100, help='Number of tokens to generate')
parser.add_argument('--context-length', type=int, default=0, help='The maximum context length')
def main(args):
options = parser.parse_args(args)
model = Model(options.model)
context = Context(model, context_length=options.context_length)
context.append(options.prompt)
print(context.tokens)
prompt_tokens = context.num_tokens
tokenizer = model.tokenizer
while context.num_tokens - prompt_tokens < options.limit:
token = context.sample()
context.append(token)
print(str(tokenizer.decode(token), encoding="utf-8"), end='', flush=True)
if __name__ == '__main__':
main(sys.argv[1:])
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/metal/examples/generate.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt_oss/metal/scripts/create-local-model.py | import argparse
import os
import math
import sys
import json
import itertools
import struct
from uuid import UUID
import tiktoken
import torch
from safetensors import safe_open
from tqdm import tqdm
from openai_harmony import load_harmony_encoding, HarmonyEncodingName
parser = argparse.ArgumentParser(prog='create-local-model.py', description='Convert a checkpoint directory to a local model file')
parser.add_argument('-s', '--src', metavar='DIR', type=str, required=True, help='Path to the input checkpoint directory')
parser.add_argument('-d', '--dst', metavar='FILE', type=str, required=True, help='Path to the output model file')
o200k_base = tiktoken.get_encoding("o200k_base")
harmony_encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
o200k_gptoss = tiktoken.Encoding(
name="o200k_gptoss",
pat_str=o200k_base._pat_str,
mergeable_ranks=o200k_base._mergeable_ranks,
special_tokens={
"<|reversed199998|>": 199998, # unused
"<|endoftext|>": 199999,
"<|untrusted|>": 200000,
"<|endofuntrusted|>": 200001,
"<|return|>": 200002,
"<|constrain|>": 200003,
"<|reversed200004|>": 200004, # unused
"<|channel|>": 200005,
"<|start|>": 200006,
"<|end|>": 200007,
"<|message|>": 200008,
"<|reversed200008|>": 200008, # unused
"<|reversed200009|>": 200009, # unused
"<|reversed200010|>": 200010, # unused
"<|reversed200011|>": 200011, # unused
"<|call|>": 200012,
"<|refusal|>": 200013,
}
)
FILE_MAGIC = struct.pack('ccccccccccccI', b'G', b'P', b'T', b'-', b'O', b'S', b'S', b' ', b'v', b'1', b'.', b'0', 0)
SPECIAL_TOKEN_UUID = {
'<|start|>': UUID('55a77c2f-8a01-4c54-8ac2-313bfc7e208d').bytes,
'<|message|>': UUID('16e40431-f47f-4b22-b59b-8b278fc30a54').bytes,
'<|end|>': UUID('fcac2f6d-4705-4f6b-b228-642accac7238').bytes,
'<|return|>': UUID('f799ff69-1992-43c4-a3d8-d831f475dc75').bytes,
'<|refusal|>': UUID('e15ba702-28c4-4292-ab8f-ffa434709128').bytes,
'<|constrain|>': UUID('c0bb14c7-6022-49da-ad08-792d67e8b470').bytes,
'<|channel|>': UUID('fd3dda11-c8ab-4033-876e-d93deb172c93').bytes,
'<|call|>': UUID('1220f796-e388-4de5-b487-fe2eb5fe03c0').bytes,
'<|untrusted|>': UUID('07d7da55-b346-4cff-8b37-7cefacf8a3e8').bytes,
'<|end_untrusted|>': UUID('f265bd9c-c717-469e-a447-920687d65d90').bytes,
}
INCLUDE_SPECIAL_TOKENS = [
"<|start|>",
"<|message|>",
"<|end|>",
"<|return|>",
"<|refusal|>",
"<|constrain|>",
"<|channel|>",
"<|call|>",
"<|untrusted|>",
"<|end_untrusted|>",
]
GPTOSS_MODEL_UUID = UUID('df52dc86-1789-4ed0-a295-66f10508145b').bytes
APPLE_GPU_LAYOUT_UUID = UUID('229177a8-5775-4268-bfd8-d588b351c56d').bytes
TIKTOKEN_TOKENIZER_UUID = UUID('7401aded-2a95-40cb-b782-9ccebaafe72b').bytes
UE8_OFFSET = 14 # bias to MXFP4 block scales
def write_file_header(f):
f.write(FILE_MAGIC)
def write_tokenizer_header(f,
num_special_tokens: int,
num_text_tokens: int,
regex_size: int,
tokens_size: int):
f.write(TIKTOKEN_TOKENIZER_UUID)
f.write(struct.pack('<I', num_special_tokens))
f.write(struct.pack('<I', num_text_tokens))
f.write(struct.pack('<I', regex_size))
f.write(struct.pack('<I', tokens_size))
def write_model_header(f,
context_length : int,
num_blocks : int,
num_experts : int,
num_active_experts : int,
embedding_dim : int,
mlp_dim : int,
swiglu_limit : float,
head_dim: int,
num_heads : int,
num_kv_heads : int,
attention_window : int,
rope_theta : float,
interpolation_scale : float,
yarn_offset : float,
yarn_scale : float,
yarn_multiplier : float,
rmsnorm_epsilon : float):
f.write(GPTOSS_MODEL_UUID)
f.write(struct.pack('<I', context_length))
f.write(struct.pack('<I', num_blocks))
f.write(struct.pack('<I', num_experts))
f.write(struct.pack('<I', num_active_experts))
f.write(struct.pack('<I', embedding_dim))
f.write(struct.pack('<I', mlp_dim))
f.write(struct.pack('<f', swiglu_limit))
f.write(struct.pack('<I', head_dim))
f.write(struct.pack('<I', num_heads))
f.write(struct.pack('<I', num_kv_heads))
f.write(struct.pack('<I', attention_window))
f.write(struct.pack('<f', rope_theta))
f.write(struct.pack('<f', interpolation_scale))
f.write(struct.pack('<f', yarn_offset))
f.write(struct.pack('<f', yarn_scale))
f.write(struct.pack('<f', yarn_multiplier))
f.write(struct.pack('<f', rmsnorm_epsilon))
f.write(APPLE_GPU_LAYOUT_UUID)
def write_padding(out_file, alignment_multiple=16384):
offset = out_file.tell()
alignment_size = -offset % alignment_multiple
if alignment_size != 0:
alignment = bytes(alignment_size)
out_file.write(alignment)
def write_embedding_weight(out_file, weight):
write_padding(out_file, alignment_multiple=16)
assert weight.dtype == torch.float8_e4m3fn or weight.dtype == torch.bfloat16
out_file.write(weight.view(torch.uint8).numpy().tobytes())
def write_rmsnorm_gain(out_file, gain):
write_padding(out_file, alignment_multiple=16)
assert gain.dtype == torch.bfloat16
out_file.write(gain.view(torch.uint8).numpy().tobytes())
def write_attn_sink(out_file, sink):
write_padding(out_file, alignment_multiple=16)
assert sink.dtype == torch.bfloat16
out_file.write(sink.view(torch.uint8).numpy().tobytes())
def write_linear_weight(out_file, *args):
write_padding(out_file, alignment_multiple=16)
for t in args:
out_file.write(t.view(torch.uint8).numpy().tobytes())
def main(args):
options = parser.parse_args(args)
with open(os.path.join(options.src, "config.json"), "r") as f:
config = json.load(f)
num_blocks = config["num_hidden_layers"]
num_experts = config["num_experts"]
num_active_experts = 4
num_q_heads = config["num_attention_heads"]
num_kv_heads = config["num_key_value_heads"]
head_dim = config["head_dim"]
embedding_dim = config["hidden_size"]
mlp_dim = config["intermediate_size"]
swiglu_limit = config.get("swiglu_limit", 7.0)
rope_theta = config["rope_theta"]
attention_window = config["sliding_window"]
initial_context_length = config["initial_context_length"]
rope_scaling_factor = config["rope_scaling_factor"]
rope_ntk_alpha = config["rope_ntk_alpha"]
rope_ntk_beta = config["rope_ntk_beta"]
tokens_size = 0
num_text_tokens = 0
# First add all text tokens
for t in range(o200k_gptoss.n_vocab):
if not harmony_encoding.is_special_token(t):
token_bytes = o200k_gptoss.decode_single_token_bytes(t)
assert len(token_bytes) > 0
tokens_size += len(token_bytes) + 2 # uint16_t string length + string data
num_text_tokens += 1
# Then add all special tokens
num_included_tokens = 200013 + 1
print(f"Tokenizer: {num_included_tokens} tokens")
# Read from all files ending with .safetensors in the checkpoint directory
safetensor_files = [
os.path.join(options.src, fname)
for fname in os.listdir(options.src)
if fname.endswith(".safetensors")
]
# Build a mapping from tensor name to filepath
tensor_name_to_file = {}
for safetensor_file in safetensor_files:
with safe_open(safetensor_file, framework="pt", device="cpu") as src:
for key in src.keys():
tensor_name_to_file[key] = safetensor_file
def get_tensor(name):
with safe_open(tensor_name_to_file[name], framework="pt", device="cpu") as src:
return src.get_tensor(name)
with open(options.dst, "wb") as dst:
write_file_header(dst)
yarn_low = (
head_dim / 2
* math.log(initial_context_length / (rope_ntk_beta * 2 * math.pi))
/ math.log(rope_theta)
)
yarn_high = (
head_dim / 2
* math.log(initial_context_length / (rope_ntk_alpha * 2 * math.pi))
/ math.log(rope_theta)
)
write_model_header(dst,
context_length=int(initial_context_length * rope_scaling_factor),
num_blocks=num_blocks,
num_experts=num_experts,
num_active_experts=num_active_experts,
embedding_dim=embedding_dim,
mlp_dim=mlp_dim,
swiglu_limit=swiglu_limit,
head_dim=head_dim,
num_heads=num_q_heads,
num_kv_heads=num_kv_heads,
attention_window=attention_window,
rope_theta=rope_theta,
interpolation_scale=1.0 / rope_scaling_factor,
yarn_offset=-yarn_low / (yarn_high - yarn_low),
yarn_scale=1.0 / (yarn_high - yarn_low),
yarn_multiplier=0.1 * math.log(rope_scaling_factor) + 1.0,
rmsnorm_epsilon=1.0e-5)
write_tokenizer_header(dst,
num_special_tokens=num_included_tokens - num_text_tokens,
num_text_tokens=num_text_tokens,
regex_size=len(o200k_gptoss._pat_str.encode("ascii")) + 1,
tokens_size=tokens_size)
### Tokenizer
# Special tokens
for token_idx in range(num_text_tokens, num_included_tokens):
token = o200k_gptoss.decode_single_token_bytes(token_idx).decode('ascii')
if token in INCLUDE_SPECIAL_TOKENS:
dst.write(SPECIAL_TOKEN_UUID[token])
else:
dst.write(bytes(16))
# Regex
dst.write(o200k_gptoss._pat_str.encode("ascii"))
dst.write(struct.pack('B', 0))
# Text tokens
tokenizer_bytes_written = 0
for t in range(num_text_tokens):
token_bytes = o200k_gptoss.decode_single_token_bytes(t)
assert len(token_bytes) > 0
dst.write(struct.pack('<H', len(token_bytes)))
dst.write(token_bytes)
tokenizer_bytes_written += len(token_bytes) + 2
assert(tokenizer_bytes_written == tokens_size), (tokenizer_bytes_written, tokens_size)
write_padding(dst)
embedding_weight = get_tensor("embedding.weight")
# Filter out unused tokens
embedding_weight = embedding_weight[:num_included_tokens, :]
write_embedding_weight(dst, embedding_weight)
for n in tqdm(range(num_blocks)):
write_rmsnorm_gain(dst, get_tensor(f"block.{n}.attn.norm.scale"))
attn_qkv_weight = get_tensor(f"block.{n}.attn.qkv.weight")
attn_qkv_bias = get_tensor(f"block.{n}.attn.qkv.bias")
for qkv in (attn_qkv_weight, attn_qkv_bias):
qk = qkv[:head_dim * (num_q_heads + num_kv_heads), ...].contiguous()
v = qkv[head_dim * (num_q_heads + num_kv_heads):, ...].contiguous()
qk = qk.view(num_q_heads + num_kv_heads, 2, head_dim // 2, -1).transpose(1, 2).reshape(num_q_heads + num_kv_heads, head_dim, -1)
q = qk[:num_q_heads, ...]
k = qk[num_q_heads:, ...]
# Factor multiplication by 1/sqrt(64) = 0.125 = 0.5 * 0.25 in SDPA into Q and K projections
assert head_dim == 64
q *= 0.5
k *= 0.25
v = v.view(num_kv_heads, head_dim, -1)
qkv.copy_(torch.cat((q, k, v), dim=0).reshape(*qkv.shape))
write_linear_weight(dst, attn_qkv_weight, attn_qkv_bias)
write_attn_sink(dst, get_tensor(f"block.{n}.attn.sinks"))
write_linear_weight(dst, get_tensor(f"block.{n}.attn.out.weight"), get_tensor(f"block.{n}.attn.out.bias"))
write_rmsnorm_gain(dst, get_tensor(f"block.{n}.mlp.norm.scale"))
write_linear_weight(dst, get_tensor(f"block.{n}.mlp.gate.weight"), get_tensor(f"block.{n}.mlp.gate.bias"))
write_rmsnorm_gain(dst, get_tensor("norm.scale"))
unembedding_weight = get_tensor("unembedding.weight")
unembedding_weight = unembedding_weight[:num_included_tokens, :]
write_linear_weight(dst, unembedding_weight)
for n in tqdm(range(num_blocks)):
mlp1_blocks = get_tensor(f"block.{n}.mlp.mlp1_weight.blocks")
mlp1_scales = get_tensor(f"block.{n}.mlp.mlp1_weight.scales")
assert mlp1_scales.min().item() < 254 - UE8_OFFSET
mlp1_bias = get_tensor(f"block.{n}.mlp.mlp1_bias")
mlp2_blocks = get_tensor(f"block.{n}.mlp.mlp2_weight.blocks")
mlp2_scales = get_tensor(f"block.{n}.mlp.mlp2_weight.scales")
assert mlp2_scales.min().item() < 254 - UE8_OFFSET
mlp2_bias = get_tensor(f"block.{n}.mlp.mlp2_bias")
# Write MoE weights grouped by expert
write_padding(dst)
for e in range(num_experts):
write_padding(dst, alignment_multiple=16)
dst.write(mlp1_blocks[e, ...].view(torch.uint8).numpy().tobytes())
write_padding(dst, alignment_multiple=16)
dst.write((mlp1_scales + UE8_OFFSET)[e, ...].view(torch.uint8).numpy().tobytes())
write_padding(dst, alignment_multiple=16)
dst.write(mlp1_bias[e, ...].view(torch.uint8).numpy().tobytes())
write_padding(dst, alignment_multiple=16)
dst.write(mlp2_blocks[e, ...].view(torch.uint8).numpy().tobytes())
write_padding(dst, alignment_multiple=16)
dst.write((mlp2_scales + UE8_OFFSET)[e, ...].view(torch.uint8).numpy().tobytes())
write_padding(dst, alignment_multiple=16)
dst.write(mlp2_bias[e, ...].view(torch.uint8).numpy().tobytes())
if __name__ == "__main__":
main(sys.argv[1:])
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/metal/scripts/create-local-model.py",
"license": "Apache License 2.0",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/responses_api/api_server.py | import os
import datetime
import uuid
from typing import Callable, Literal, Optional, Union
from fastapi import FastAPI, Request
from fastapi.exception_handlers import request_validation_exception_handler
from fastapi.exceptions import RequestValidationError
from fastapi.responses import StreamingResponse
from openai_harmony import (
Author,
Conversation,
DeveloperContent,
HarmonyEncoding,
Message,
ReasoningEffort,
Role,
StreamableParser,
StreamState,
SystemContent,
ToolDescription,
)
from gpt_oss.tools.python_docker.docker_tool import PythonTool
from gpt_oss.tools.simple_browser import SimpleBrowserTool
from gpt_oss.tools.simple_browser.backend import YouComBackend, ExaBackend
from .events import (
ResponseCodeInterpreterCallCodeDelta,
ResponseCodeInterpreterCallCodeDone,
ResponseCodeInterpreterCallCompleted,
ResponseCodeInterpreterCallInProgress,
ResponseCodeInterpreterCallInterpreting,
ResponseCompletedEvent,
ResponseContentPartAdded,
ResponseContentPartDone,
ResponseCreatedEvent,
ResponseEvent,
ResponseInProgressEvent,
ResponseOutputItemAdded,
ResponseOutputItemDone,
ResponseOutputTextAnnotationAdded,
ResponseOutputTextDelta,
ResponseOutputTextDone,
ResponseReasoningTextDelta,
ResponseReasoningTextDone,
ResponseWebSearchCallCompleted,
ResponseWebSearchCallInProgress,
ResponseWebSearchCallSearching,
)
from .types import (
CodeInterpreterCallItem,
CodeInterpreterOutputImage,
CodeInterpreterOutputLogs,
Error,
FunctionCallItem,
Item,
ReasoningItem,
ReasoningTextContentItem,
ResponseObject,
ResponsesRequest,
TextContentItem,
UrlCitation,
Usage,
WebSearchActionFind,
WebSearchActionOpenPage,
WebSearchActionSearch,
WebSearchCallItem,
)
DEFAULT_TEMPERATURE = 0.0
def get_reasoning_effort(
effort: Union[Literal["low", "medium", "high"], ReasoningEffort]
) -> ReasoningEffort:
if isinstance(effort, ReasoningEffort):
return effort
if effort == "low":
return ReasoningEffort.LOW
if effort == "medium":
return ReasoningEffort.MEDIUM
if effort == "high":
return ReasoningEffort.HIGH
raise ValueError(f"Invalid reasoning effort: {effort}")
def is_not_builtin_tool(
recipient: str, treat_functions_python_as_builtin: bool = False
) -> bool:
if treat_functions_python_as_builtin and recipient == "functions.python":
return False
return (
not recipient.startswith("browser.")
and recipient != "python"
and recipient != "assistant"
)
def create_api_server(
infer_next_token: Callable[[list[int], float], int], encoding: HarmonyEncoding
) -> FastAPI:
app = FastAPI()
@app.exception_handler(RequestValidationError)
async def log_validation_error(request: Request, exc: RequestValidationError):
try:
body_bytes = await request.body()
print(
"Invalid request body received:"
f" {body_bytes.decode('utf-8', errors='replace')}"
)
except Exception as body_exc:
print(f"Failed to read invalid request body: {body_exc}")
return await request_validation_exception_handler(request, exc)
responses_store: dict[str, tuple[ResponsesRequest, ResponseObject]] = {}
def generate_response(
input_tokens: list[int],
output_tokens: list[int],
request_body: ResponsesRequest,
debug_mode: bool = False,
function_call_ids: Optional[list[tuple[str, str]]] = None,
response_id: Optional[str] = None,
previous_response_id: Optional[str] = None,
browser_tool: Optional[SimpleBrowserTool] = None,
browser_call_ids: Optional[list[str]] = None,
python_tool: Optional[PythonTool] = None,
python_call_ids: Optional[list[str]] = None,
python_call_outputs: Optional[
dict[str, list[CodeInterpreterOutputLogs | CodeInterpreterOutputImage]]
] = None,
reasoning_ids: Optional[list[str]] = None,
message_ids: Optional[list[str]] = None,
treat_functions_python_as_builtin: bool = False,
) -> ResponseObject:
output = []
error = None
if len(output_tokens) > 0:
if debug_mode:
try:
entries = encoding.parse_messages_from_completion_tokens(
output_tokens, Role.ASSISTANT
)
except Exception as e:
print(f"Error parsing tokens: {e}")
error = Error(
code="invalid_function_call",
message=f"{e}",
)
entries = []
else:
entries = encoding.parse_messages_from_completion_tokens(
output_tokens, Role.ASSISTANT
)
fc_index = 0
browser_tool_index = 0
python_tool_index = 0
reasoning_ids_iter = iter(reasoning_ids or [])
message_ids_iter = iter(message_ids or [])
for entry in entries:
entry_dict = entry.to_dict()
recipient = entry_dict.get("recipient", "")
if len(recipient) > 0 and is_not_builtin_tool(
recipient, treat_functions_python_as_builtin
):
call = entry_dict["content"][0]
arguments = call["text"]
name = recipient
if name.startswith("functions."):
name = name[len("functions.") :]
if function_call_ids and fc_index < len(function_call_ids):
fc_id, call_id = function_call_ids[fc_index]
else:
fc_id, call_id = (
f"fc_{uuid.uuid4().hex}",
f"call_{uuid.uuid4().hex}",
)
fc_index += 1
output.append(
FunctionCallItem(
type="function_call",
name=name,
arguments=arguments,
id=fc_id,
call_id=call_id,
)
)
elif (
len(recipient) > 0
and recipient.startswith("browser.")
and browser_tool is not None
):
# Mirror event-based creation of WebSearchCallItems when the browser tool is invoked
name = recipient
call = entry_dict["content"][0]
arguments = call["text"]
function_name = name[len("browser.") :]
# Reconstruct a Message for argument parsing
tool_msg = (
Message.from_role_and_content(Role.ASSISTANT, arguments)
.with_recipient(name)
.with_channel("analysis")
)
action = None
try:
parsed_args = browser_tool.process_arguments(tool_msg)
if function_name == "search":
action = WebSearchActionSearch(
type="search",
query=parsed_args["query"],
)
elif function_name == "open":
action = WebSearchActionOpenPage(
type="open_page",
url=parsed_args["url"],
)
elif function_name == "find":
action = WebSearchActionFind(
type="find",
pattern=parsed_args["pattern"],
url=parsed_args["url"],
)
except Exception as e:
print(f"Error processing browser tool arguments: {e}")
action = None
if action is not None:
if browser_call_ids and browser_tool_index < len(
browser_call_ids
):
web_search_call_id = browser_call_ids[browser_tool_index]
else:
web_search_call_id = f"ws_{uuid.uuid4().hex}"
browser_tool_index += 1
output.append(
WebSearchCallItem(
type="web_search_call",
id=web_search_call_id,
action=action,
)
)
elif (
len(recipient) > 0
and (
recipient.startswith("python")
or (
treat_functions_python_as_builtin
and recipient == "functions.python"
)
)
and python_tool is not None
):
if python_call_ids and python_tool_index < len(python_call_ids):
code_call_id = python_call_ids[python_tool_index]
else:
code_call_id = f"ci_{uuid.uuid4().hex}"
python_tool_index += 1
code_snippet = None
if entry_dict.get("content"):
code_snippet = entry_dict["content"][0].get("text")
outputs = (
(python_call_outputs or {}).get(code_call_id)
if python_call_outputs
else None
)
output.append(
CodeInterpreterCallItem(
type="code_interpreter_call",
id=code_call_id,
status="completed",
code=code_snippet,
outputs=outputs,
)
)
elif entry_dict["channel"] == "final":
content = []
for content_entry in entry_dict["content"]:
if browser_tool:
text_content, annotation_entries, _has_partial_citations = (
browser_tool.normalize_citations(content_entry["text"])
)
annotations = [UrlCitation(**a) for a in annotation_entries]
else:
text_content = content_entry["text"]
annotations = []
content.append(
TextContentItem(
type="output_text",
text=text_content,
annotations=annotations,
)
)
message_id = next(message_ids_iter, None)
output.append(
Item(
id=message_id,
type="message",
role="assistant",
content=content,
status="completed",
)
)
elif entry_dict["channel"] == "analysis":
if entry_dict.get("recipient"):
continue
author_dict = entry_dict.get("author") or {}
if author_dict.get("role") and author_dict.get("role") != "assistant":
continue
summary = []
content = [
ReasoningTextContentItem(
type="reasoning_text",
text=entry["text"],
)
for entry in entry_dict["content"]
]
reasoning_id = next(reasoning_ids_iter, None)
if reasoning_id is None:
reasoning_id = f"rs_{uuid.uuid4().hex}"
output.append(
ReasoningItem(
id=reasoning_id,
type="reasoning",
summary=summary,
content=content,
)
)
else:
output = []
usage = (
Usage(
input_tokens=len(input_tokens),
output_tokens=len(output_tokens),
total_tokens=len(input_tokens) + len(output_tokens),
)
if len(output_tokens) > 0
else None
)
try:
debug_str = encoding.decode_utf8(input_tokens + output_tokens)
except Exception:
debug_str = input_tokens + output_tokens
try:
debug_input_str = encoding.decode_utf8(input_tokens)
except Exception:
debug_input_str = input_tokens
try:
debug_output_str = encoding.decode_utf8(output_tokens)
except Exception:
debug_output_str = output_tokens
metadata = (
{
"__debug": debug_str,
"__debug_input": debug_input_str,
"__debug_output": debug_output_str,
}
if debug_mode
else {}
)
return ResponseObject(
created_at=int(datetime.datetime.now().timestamp()),
status="completed",
output=output,
text={"format": {"type": "text"}},
usage=usage,
max_output_tokens=request_body.max_output_tokens,
error=error,
metadata=metadata,
id=response_id,
previous_response_id=previous_response_id,
)
class StreamResponsesEvents:
BROWSER_RESERVED_FUNCTIONS = {"browser.search", "browser.open", "browser.find"}
initial_tokens: list[int]
tokens: list[int]
output_tokens: list[int]
output_text: str
request_body: ResponsesRequest
request: Request
sequence_number: int
def __init__(
self,
initial_tokens,
request_body: ResponsesRequest,
as_sse: bool = False,
request: Optional[Request] = None,
response_id: Optional[str] = None,
store_callback: Optional[
Callable[[str, ResponsesRequest, ResponseObject], None]
] = None,
browser_tool: Optional[SimpleBrowserTool] = None,
python_tool: Optional[PythonTool] = None,
functions_python_as_builtin: bool = False,
):
self.initial_tokens = initial_tokens
self.tokens = initial_tokens.copy()
self.output_tokens = []
self.output_text = ""
self.request_body = request_body
self.parser = StreamableParser(encoding, role=Role.ASSISTANT)
self.as_sse = as_sse
self.debug_mode = request_body.metadata.get(
"__debug", False
) # we use this for demo purposes
# Set temperature for this stream, fallback to DEFAULT_TEMPERATURE if not set
self.temperature = (
request_body.temperature
if request_body.temperature is not None
else DEFAULT_TEMPERATURE
)
self.request = request
self.sequence_number = 0
self.function_call_ids: list[tuple[str, str]] = []
self.response_id = response_id
self.store_callback = store_callback
self.new_request = True
self.browser_tool = browser_tool
self.use_browser_tool = browser_tool is not None
self.browser_call_ids: list[str] = []
self.python_tool = python_tool
self.use_code_interpreter = python_tool is not None
self.python_call_ids: list[str] = []
self.python_call_outputs: dict[
str, list[CodeInterpreterOutputLogs | CodeInterpreterOutputImage]
] = {}
self.reasoning_item_ids: list[str] = []
self.current_reasoning_item_id: Optional[str] = None
self.message_item_ids: list[str] = []
self.current_message_item_id: Optional[str] = None
self.functions_python_as_builtin = functions_python_as_builtin
self.user_defined_function_names = {
name
for tool in (request_body.tools or [])
for name in [getattr(tool, "name", None)]
if getattr(tool, "type", None) == "function" and name
}
def _resolve_browser_recipient(
self, recipient: Optional[str]
) -> tuple[Optional[str], bool]:
if not self.use_browser_tool or not recipient:
return (None, False)
if recipient.startswith("browser."):
return (recipient, False)
if recipient.startswith("functions."):
potential = recipient[len("functions.") :]
if (
potential in self.BROWSER_RESERVED_FUNCTIONS
and potential not in self.user_defined_function_names
):
return (potential, True)
return (None, False)
def _ensure_message_item_id(self) -> str:
if self.current_message_item_id is None:
message_id = f"item_{uuid.uuid4().hex}"
self.current_message_item_id = message_id
self.message_item_ids.append(message_id)
return self.current_message_item_id
def _ensure_reasoning_item_id(self) -> str:
if self.current_reasoning_item_id is None:
reasoning_id = f"rs_{uuid.uuid4().hex}"
self.current_reasoning_item_id = reasoning_id
self.reasoning_item_ids.append(reasoning_id)
return self.current_reasoning_item_id
def _send_event(self, event: ResponseEvent):
event.sequence_number = self.sequence_number
self.sequence_number += 1
if self.as_sse:
return f"event: {event.type}\ndata: {event.model_dump_json(indent=None)}\n\n"
else:
return event
async def run(self):
browser_tool = self.browser_tool
self.new_request = True
initial_response = generate_response(
self.initial_tokens,
self.output_tokens,
self.request_body,
function_call_ids=self.function_call_ids,
response_id=self.response_id,
previous_response_id=self.request_body.previous_response_id,
browser_tool=self.browser_tool,
browser_call_ids=self.browser_call_ids,
python_tool=self.python_tool,
python_call_ids=self.python_call_ids,
python_call_outputs=getattr(self, "python_call_outputs", None),
reasoning_ids=self.reasoning_item_ids,
message_ids=self.message_item_ids,
treat_functions_python_as_builtin=self.functions_python_as_builtin,
)
initial_response.status = "in_progress"
yield self._send_event(
ResponseCreatedEvent(
type="response.created",
response=initial_response,
)
)
yield self._send_event(
ResponseInProgressEvent(
type="response.in_progress",
response=initial_response,
)
)
current_content_index = (
0 # for this implementation we will always have one content item only
)
current_output_index = -1
sent_output_item_added = False
# we use this if the model outputs a citation to buffer until completed
output_delta_buffer = ""
# we use this to track the current output text content for things like providing the right indices in citations
current_output_text_content = ""
current_annotations = []
while True:
# Check for client disconnect
if self.request is not None and await self.request.is_disconnected():
print("Client disconnected, stopping token generation.")
break
next_tok = infer_next_token(
self.tokens,
temperature=self.temperature,
new_request=self.new_request,
)
self.new_request = False
self.tokens.append(next_tok)
try:
self.parser.process(next_tok)
except Exception:
pass
if self.parser.state == StreamState.EXPECT_START:
current_output_index += 1
sent_output_item_added = False
if len(self.parser.messages) > 0:
previous_item = self.parser.messages[-1]
if previous_item.recipient is not None:
recipient = previous_item.recipient
browser_recipient, _ = self._resolve_browser_recipient(
recipient
)
if (
browser_recipient is None
and not (
recipient == "python"
or (
self.functions_python_as_builtin
and recipient == "functions.python"
)
)
):
fc_id = f"fc_{uuid.uuid4().hex}"
call_id = f"call_{uuid.uuid4().hex}"
self.function_call_ids.append((fc_id, call_id))
yield self._send_event(
ResponseOutputItemDone(
type="response.output_item.done",
output_index=current_output_index,
item=FunctionCallItem(
type="function_call",
name=(
previous_item.recipient[
len("functions.") :
]
if previous_item.recipient.startswith(
"functions."
)
else previous_item.recipient
),
arguments=previous_item.content[0].text,
id=fc_id,
call_id=call_id,
),
)
)
if (
previous_item.channel == "analysis"
and previous_item.recipient is None
):
reasoning_id = (
self.current_reasoning_item_id
if self.current_reasoning_item_id is not None
else self._ensure_reasoning_item_id()
)
reasoning_text = previous_item.content[0].text
yield self._send_event(
ResponseReasoningTextDone(
type="response.reasoning_text.done",
output_index=current_output_index,
content_index=current_content_index,
item_id=reasoning_id,
text=reasoning_text,
)
)
yield self._send_event(
ResponseContentPartDone(
type="response.content_part.done",
output_index=current_output_index,
content_index=current_content_index,
item_id=reasoning_id,
part=ReasoningTextContentItem(
type="reasoning_text",
text=reasoning_text,
),
)
)
yield self._send_event(
ResponseOutputItemDone(
type="response.output_item.done",
output_index=current_output_index,
item=ReasoningItem(
id=reasoning_id,
type="reasoning",
summary=[],
content=[
ReasoningTextContentItem(
type="reasoning_text",
text=reasoning_text,
)
],
),
)
)
self.current_reasoning_item_id = None
if previous_item.channel == "final":
annotations = [
UrlCitation(**a) for a in current_annotations
]
if browser_tool:
(
normalized_text,
_annotations,
_has_partial_citations,
) = browser_tool.normalize_citations(
previous_item.content[0].text
)
else:
normalized_text = previous_item.content[0].text
annotations = []
text_content = TextContentItem(
type="output_text",
text=normalized_text,
annotations=annotations,
)
message_id = (
self.current_message_item_id
if self.current_message_item_id is not None
else self._ensure_message_item_id()
)
yield self._send_event(
ResponseOutputTextDone(
type="response.output_text.done",
output_index=current_output_index,
content_index=current_content_index,
item_id=message_id,
text=normalized_text,
)
)
yield self._send_event(
ResponseContentPartDone(
type="response.content_part.done",
output_index=current_output_index,
content_index=current_content_index,
item_id=message_id,
part=text_content,
)
)
yield self._send_event(
ResponseOutputItemDone(
type="response.output_item.done",
output_index=current_output_index,
item=Item(
id=message_id,
type="message",
role="assistant",
content=[text_content],
),
)
)
current_annotations = []
current_output_text_content = ""
self.current_message_item_id = None
if (
self.parser.last_content_delta
and self.parser.current_channel == "final"
and self.parser.current_recipient is None
):
if not sent_output_item_added:
sent_output_item_added = True
message_id = self._ensure_message_item_id()
yield self._send_event(
ResponseOutputItemAdded(
type="response.output_item.added",
output_index=current_output_index,
item=Item(
id=message_id,
type="message",
role="assistant",
content=[],
),
)
)
yield self._send_event(
ResponseContentPartAdded(
type="response.content_part.added",
output_index=current_output_index,
content_index=current_content_index,
item_id=message_id,
part=TextContentItem(type="output_text", text=""),
)
)
output_delta_buffer += self.parser.last_content_delta
should_send_output_text_delta = True
if browser_tool:
# we normalize on the full current text to get the right indices in citations
updated_output_text, annotations, has_partial_citations = (
browser_tool.normalize_citations(
current_output_text_content + output_delta_buffer
)
)
# remove the current text to get back the delta but now normalized
output_delta_buffer = updated_output_text[
len(current_output_text_content) :
]
# Filter annotations to only include those whose start_index is not already present in current_annotations
# this is to avoid sending duplicate annotations as multiple annotations can't be in the same place
existing_start_indices = {
a["start_index"] for a in current_annotations
}
new_annotations = [
a
for a in annotations
if a["start_index"] not in existing_start_indices
]
for a in new_annotations:
current_annotations.append(a)
citation = UrlCitation(**a)
message_id = self._ensure_message_item_id()
yield self._send_event(
ResponseOutputTextAnnotationAdded(
type="response.output_text.annotation.added",
output_index=current_output_index,
content_index=current_content_index,
item_id=message_id,
annotation_index=len(current_annotations),
annotation=citation,
)
)
if has_partial_citations:
should_send_output_text_delta = False
if should_send_output_text_delta:
message_id = self._ensure_message_item_id()
yield self._send_event(
ResponseOutputTextDelta(
type="response.output_text.delta",
output_index=current_output_index,
content_index=current_content_index,
item_id=message_id,
delta=output_delta_buffer,
)
)
current_output_text_content += output_delta_buffer
output_delta_buffer = ""
if (
self.parser.last_content_delta
and self.parser.current_channel == "analysis"
and self.parser.current_recipient is None
):
if not sent_output_item_added:
sent_output_item_added = True
reasoning_id = self._ensure_reasoning_item_id()
yield self._send_event(
ResponseOutputItemAdded(
type="response.output_item.added",
output_index=current_output_index,
item=ReasoningItem(
id=reasoning_id,
type="reasoning",
summary=[],
content=[],
),
)
)
yield self._send_event(
ResponseContentPartAdded(
type="response.content_part.added",
output_index=current_output_index,
content_index=current_content_index,
item_id=reasoning_id,
part=ReasoningTextContentItem(
type="reasoning_text", text=""
),
)
)
reasoning_id = self._ensure_reasoning_item_id()
yield self._send_event(
ResponseReasoningTextDelta(
type="response.reasoning_text.delta",
output_index=current_output_index,
content_index=current_content_index,
item_id=reasoning_id,
delta=self.parser.last_content_delta,
)
)
try:
# purely for debugging purposes
output_token_text = encoding.decode_utf8([next_tok])
self.output_text += output_token_text
print(output_token_text, end="", flush=True)
except RuntimeError:
pass
if next_tok in encoding.stop_tokens_for_assistant_actions():
if len(self.parser.messages) > 0:
last_message = self.parser.messages[-1]
browser_recipient, is_browser_fallback = (
self._resolve_browser_recipient(last_message.recipient)
)
if browser_recipient is not None and browser_tool is not None:
message_for_browser = (
last_message
if not is_browser_fallback
else last_message.with_recipient(browser_recipient)
)
function_name = browser_recipient[len("browser.") :]
action = None
parsed_args = browser_tool.process_arguments(
message_for_browser
)
if function_name == "search":
action = WebSearchActionSearch(
type="search",
query=parsed_args["query"],
)
elif function_name == "open":
action = WebSearchActionOpenPage(
type="open_page",
url=(
parsed_args["url"]
if "url" in parsed_args
else None
),
)
elif function_name == "find":
action = WebSearchActionFind(
type="find",
pattern=parsed_args["pattern"],
url=(
parsed_args["url"]
if "url" in parsed_args
else None
),
)
if action is not None:
web_search_call_id = f"ws_{uuid.uuid4().hex}"
self.browser_call_ids.append(web_search_call_id)
yield self._send_event(
ResponseOutputItemAdded(
type="response.output_item.added",
output_index=current_output_index,
item=WebSearchCallItem(
type="web_search_call",
id=web_search_call_id,
action=action,
),
)
)
yield self._send_event(
ResponseWebSearchCallInProgress(
type="response.web_search_call.in_progress",
output_index=current_output_index,
item_id=web_search_call_id,
)
)
async def run_tool():
results = []
async for msg in browser_tool.process(
message_for_browser
):
results.append(msg)
return results
yield self._send_event(
ResponseWebSearchCallSearching(
type="response.web_search_call.searching",
output_index=current_output_index,
item_id=web_search_call_id,
)
)
result = await run_tool()
new_tokens = encoding.render_conversation_for_completion(
Conversation.from_messages(result), Role.ASSISTANT
)
print(encoding.decode_utf8(new_tokens))
self.output_tokens.append(next_tok)
self.tokens.append(
encoding.encode("<|end|>", allowed_special="all")[0]
)
for token in new_tokens:
self.parser.process(token)
self.output_tokens.append(token)
self.tokens.append(token)
yield self._send_event(
ResponseWebSearchCallCompleted(
type="response.web_search_call.completed",
output_index=current_output_index,
item_id=web_search_call_id,
)
)
yield self._send_event(
ResponseOutputItemDone(
type="response.output_item.done",
output_index=current_output_index,
item=WebSearchCallItem(
type="web_search_call",
id=web_search_call_id,
action=action,
),
)
)
current_output_index += 1
self.new_request = True
continue
elif (
self.use_code_interpreter
and last_message.recipient is not None
and (
last_message.recipient.startswith("python")
or (
self.functions_python_as_builtin
and last_message.recipient == "functions.python"
)
)
):
code_call_id = f"ci_{uuid.uuid4().hex}"
code_snippet = None
if (
last_message.content
and len(last_message.content) > 0
and getattr(last_message.content[0], "text", None)
):
text_value = last_message.content[0].text or ""
code_snippet = text_value if text_value.strip() else None
self.python_call_ids.append(code_call_id)
yield self._send_event(
ResponseOutputItemAdded(
type="response.output_item.added",
output_index=current_output_index,
item=CodeInterpreterCallItem(
type="code_interpreter_call",
id=code_call_id,
status="in_progress",
code=code_snippet,
),
)
)
yield self._send_event(
ResponseCodeInterpreterCallInProgress(
type="response.code_interpreter_call.in_progress",
output_index=current_output_index,
item_id=code_call_id,
)
)
if code_snippet:
yield self._send_event(
ResponseCodeInterpreterCallCodeDelta(
type="response.code_interpreter_call_code.delta",
output_index=current_output_index,
item_id=code_call_id,
delta=code_snippet,
)
)
yield self._send_event(
ResponseCodeInterpreterCallCodeDone(
type="response.code_interpreter_call_code.done",
output_index=current_output_index,
item_id=code_call_id,
code=code_snippet,
)
)
yield self._send_event(
ResponseCodeInterpreterCallInterpreting(
type="response.code_interpreter_call.interpreting",
output_index=current_output_index,
item_id=code_call_id,
)
)
async def run_python_tool():
results = []
async for msg in self.python_tool.process(last_message):
results.append(msg)
return results
result = await run_python_tool()
print(result)
code_outputs: list[
CodeInterpreterOutputLogs | CodeInterpreterOutputImage
] = []
for message in result:
for content in getattr(message, "content", []):
text_value = getattr(content, "text", None)
if text_value:
code_outputs.append(
CodeInterpreterOutputLogs(
type="logs",
logs=text_value,
)
)
self.python_call_outputs[code_call_id] = code_outputs
new_tokens = encoding.render_conversation_for_completion(
Conversation.from_messages(result), Role.ASSISTANT
)
print(encoding.decode_utf8(new_tokens))
self.output_tokens.append(next_tok)
self.tokens.append(
encoding.encode("<|end|>", allowed_special="all")[0]
)
for token in new_tokens:
self.parser.process(token)
self.output_tokens.append(token)
self.tokens.append(token)
yield self._send_event(
ResponseCodeInterpreterCallCompleted(
type="response.code_interpreter_call.completed",
output_index=current_output_index,
item_id=code_call_id,
)
)
yield self._send_event(
ResponseOutputItemDone(
type="response.output_item.done",
output_index=current_output_index,
item=CodeInterpreterCallItem(
type="code_interpreter_call",
id=code_call_id,
status="completed",
code=code_snippet,
outputs=code_outputs or None,
),
)
)
current_output_index += 1
self.new_request = True
continue
else:
break
else:
raise ValueError("No messages to process")
if len(self.output_tokens) >= self.request_body.max_output_tokens:
break
# Adding in the end if we know we are not done
self.output_tokens.append(next_tok)
if self.request is None or not await self.request.is_disconnected():
response = generate_response(
self.initial_tokens,
self.output_tokens,
self.request_body,
debug_mode=self.debug_mode,
function_call_ids=self.function_call_ids,
response_id=self.response_id,
previous_response_id=self.request_body.previous_response_id,
browser_tool=self.browser_tool,
browser_call_ids=self.browser_call_ids,
python_tool=self.python_tool,
python_call_ids=self.python_call_ids,
python_call_outputs=self.python_call_outputs,
reasoning_ids=self.reasoning_item_ids,
message_ids=self.message_item_ids,
treat_functions_python_as_builtin=self.functions_python_as_builtin,
)
if self.store_callback and self.request_body.store:
self.store_callback(self.response_id, self.request_body, response)
yield self._send_event(
ResponseCompletedEvent(
type="response.completed",
response=response,
)
)
@app.post("/v1/responses", response_model=ResponseObject)
async def generate(body: ResponsesRequest, request: Request):
print("request received")
print(body.reasoning)
use_browser_tool = any(
getattr(tool, "type", None) in ("browser_search", "web_search")
for tool in (body.tools or [])
)
use_code_interpreter = any(
getattr(tool, "type", None) == "code_interpreter"
for tool in (body.tools or [])
)
if use_browser_tool:
tool_backend = os.getenv("BROWSER_BACKEND", "exa")
if tool_backend == "youcom":
backend = YouComBackend(source="web")
elif tool_backend == "exa":
backend = ExaBackend(source="web")
else:
raise ValueError(f"Invalid tool backend: {tool_backend}")
browser_tool = SimpleBrowserTool(backend=backend)
else:
browser_tool = None
if use_code_interpreter:
python_tool = PythonTool()
else:
python_tool = None
python_function_name_conflict = any(
getattr(tool, "type", None) == "function"
and getattr(tool, "name", None) == "python"
for tool in (body.tools or [])
)
functions_python_as_builtin = use_code_interpreter and not (
python_function_name_conflict
)
if body.previous_response_id:
prev = responses_store.get(body.previous_response_id)
if prev:
prev_req, prev_resp = prev
def _ensure_list(inp):
if isinstance(inp, str):
return [
Item(
type="message",
role="user",
content=[TextContentItem(type="input_text", text=inp)],
)
]
return list(inp)
merged_input = _ensure_list(prev_req.input) + list(prev_resp.output)
merged_input.extend(_ensure_list(body.input))
if body.instructions is None:
body.instructions = prev_req.instructions
body.input = merged_input
system_message_content = SystemContent.new().with_conversation_start_date(
datetime.datetime.now().strftime("%Y-%m-%d")
)
if body.reasoning is not None:
try:
reasoning_effort = get_reasoning_effort(body.reasoning.effort)
except ValueError as e:
from fastapi import HTTPException
print(e)
raise HTTPException(status_code=422, detail=str(e))
system_message_content = system_message_content.with_reasoning_effort(
reasoning_effort
)
if use_browser_tool:
system_message_content = system_message_content.with_tools(
browser_tool.tool_config
)
if use_code_interpreter:
system_message_content = system_message_content.with_tools(
python_tool.tool_config
)
system_message = Message.from_role_and_content(
Role.SYSTEM, system_message_content
)
messages = [system_message]
if body.instructions or body.tools:
developer_message_content = DeveloperContent.new().with_instructions(
body.instructions
)
tools = []
for tool in body.tools:
if tool.type == "function":
tools.append(
ToolDescription.new(
tool.name,
tool.description,
tool.parameters,
)
)
if tools:
developer_message_content = (
developer_message_content.with_function_tools(tools)
)
developer_message = Message.from_role_and_content(
Role.DEVELOPER, developer_message_content
)
messages.append(developer_message)
if isinstance(body.input, str):
user_message = Message.from_role_and_content(Role.USER, body.input)
messages.append(user_message)
else:
is_last_message_function_call_output = (
len(body.input) > 0 and body.input[-1].type == "function_call_output"
)
function_call_map = {}
# Find the index of the last assistant message
last_assistant_idx = -1
for idx, item in enumerate(body.input):
if item.type == "message" and item.role == Role.ASSISTANT:
last_assistant_idx = idx
for idx, item in enumerate(body.input):
if item.type == "message":
# TODO: add system prompt handling
if isinstance(item.content, str):
messages.append(
Message.from_role_and_content(item.role, item.content)
)
else:
for content_item in item.content:
messages.append(
Message.from_role_and_content(
item.role, content_item.text
)
)
# add final channel to the last assistant message if it's from the assistant
if item.role == Role.ASSISTANT:
messages[-1] = messages[-1].with_channel("final")
elif item.type == "reasoning":
# Only include reasoning if it is after the last assistant message and we are handling a function call at the moment
if (
idx > last_assistant_idx
and is_last_message_function_call_output
):
for content_item in item.content:
messages.append(
Message.from_role_and_content(
Role.ASSISTANT, content_item.text
).with_channel("analysis")
)
elif item.type == "function_call":
function_call_map[item.call_id] = item
messages.append(
Message.from_role_and_content(Role.ASSISTANT, item.arguments)
.with_recipient(f"functions.{item.name}")
.with_channel("commentary")
)
elif item.type == "function_call_output":
function_call = function_call_map.get(item.call_id, None)
if not function_call:
raise ValueError(f"Function call {item.call_id} not found")
messages.append(
Message.from_author_and_content(
Author.new(Role.TOOL, f"functions.{function_call.name}"),
item.output,
)
.with_recipient("assistant")
.with_channel("commentary")
)
conversation = Conversation.from_messages(messages)
initial_tokens = encoding.render_conversation_for_completion(
conversation, Role.ASSISTANT
)
print(encoding.decode_utf8(initial_tokens))
response_id = f"resp_{uuid.uuid4().hex}"
def store_callback(rid: str, req: ResponsesRequest, resp: ResponseObject):
responses_store[rid] = (req, resp)
event_stream = StreamResponsesEvents(
initial_tokens,
body,
as_sse=body.stream,
request=request,
response_id=response_id,
store_callback=store_callback,
browser_tool=browser_tool,
python_tool=python_tool,
functions_python_as_builtin=functions_python_as_builtin,
)
if body.stream:
return StreamingResponse(event_stream.run(), media_type="text/event-stream")
else:
last_event = None
async for event in event_stream.run():
last_event = event
return last_event.response
return app
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/responses_api/api_server.py",
"license": "Apache License 2.0",
"lines": 1249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/responses_api/events.py | # torchrun --nproc-per-node=4 responses_api.py
from typing import Literal, Optional, Union
from pydantic import BaseModel
from .types import (
CodeInterpreterCallItem,
CodeInterpreterOutputImage,
CodeInterpreterOutputLogs,
FunctionCallItem,
Item,
ReasoningItem,
ReasoningTextContentItem,
ResponseObject,
TextContentItem,
UrlCitation,
WebSearchCallItem,
)
class ResponseEvent(BaseModel):
sequence_number: Optional[int] = 1
class ResponseCreatedEvent(ResponseEvent):
type: Literal["response.created"]
response: ResponseObject
class ResponseCompletedEvent(ResponseEvent):
type: Literal["response.completed"]
response: ResponseObject
class ResponseOutputTextDelta(ResponseEvent):
type: Literal["response.output_text.delta"] = "response.output_text.delta"
item_id: str = "item_1234"
output_index: int = 0
content_index: int = 0
delta: str = ""
logprobs: list = []
class ResponseReasoningSummaryTextDelta(ResponseEvent):
type: Literal["response.reasoning_summary_text.delta"] = (
"response.reasoning_summary_text.delta"
)
item_id: str = "item_1234"
output_index: int = 0
content_index: int = 0
delta: str = ""
class ResponseReasoningTextDelta(ResponseEvent):
type: Literal["response.reasoning_text.delta"] = "response.reasoning_text.delta"
item_id: str = "item_1234"
output_index: int = 0
content_index: int = 0
delta: str = ""
class ResponseReasoningTextDone(ResponseEvent):
type: Literal["response.reasoning_text.done"] = "response.reasoning_text.done"
item_id: str = "item_1234"
output_index: int = 0
content_index: int = 0
text: str = ""
class ResponseOutputItemAdded(ResponseEvent):
type: Literal["response.output_item.added"] = "response.output_item.added"
output_index: int = 0
item: Union[
Item,
ReasoningItem,
FunctionCallItem,
WebSearchCallItem,
CodeInterpreterCallItem,
]
class ResponseOutputItemDone(ResponseEvent):
type: Literal["response.output_item.done"] = "response.output_item.done"
output_index: int = 0
item: Union[
Item,
ReasoningItem,
FunctionCallItem,
WebSearchCallItem,
CodeInterpreterCallItem,
]
class ResponseInProgressEvent(ResponseEvent):
type: Literal["response.in_progress"]
response: ResponseObject
class ResponseContentPartAdded(ResponseEvent):
type: Literal["response.content_part.added"] = "response.content_part.added"
item_id: str = "item_1234"
output_index: int = 0
content_index: int = 0
part: Union[TextContentItem, ReasoningTextContentItem]
class ResponseOutputTextDone(ResponseEvent):
type: Literal["response.output_text.done"] = "response.output_text.done"
item_id: str = "item_1234"
output_index: int = 0
content_index: int = 0
text: str = ""
logprobs: list = []
class ResponseContentPartDone(ResponseEvent):
type: Literal["response.content_part.done"] = "response.content_part.done"
item_id: str = "item_1234"
output_index: int = 0
content_index: int = 0
part: Union[TextContentItem, ReasoningTextContentItem]
class ResponseOutputTextAnnotationAdded(ResponseEvent):
type: Literal["response.output_text.annotation.added"] = (
"response.output_text.annotation.added"
)
item_id: str = "item_1234"
output_index: int = 0
content_index: int = 0
annotation_index: int = 0
annotation: UrlCitation
class ResponseWebSearchCallInProgress(ResponseEvent):
type: Literal["response.web_search_call.in_progress"] = (
"response.web_search_call.in_progress"
)
output_index: int = 0
item_id: str = "item_1234"
class ResponseWebSearchCallSearching(ResponseEvent):
type: Literal["response.web_search_call.searching"] = (
"response.web_search_call.searching"
)
output_index: int = 0
item_id: str = "item_1234"
class ResponseWebSearchCallCompleted(ResponseEvent):
type: Literal["response.web_search_call.completed"] = (
"response.web_search_call.completed"
)
output_index: int = 0
item_id: str = "item_1234"
class ResponseCodeInterpreterCallInProgress(ResponseEvent):
type: Literal["response.code_interpreter_call.in_progress"] = (
"response.code_interpreter_call.in_progress"
)
output_index: int = 0
item_id: str = "item_1234"
class ResponseCodeInterpreterCallInterpreting(ResponseEvent):
type: Literal["response.code_interpreter_call.interpreting"] = (
"response.code_interpreter_call.interpreting"
)
output_index: int = 0
item_id: str = "item_1234"
class ResponseCodeInterpreterCallCodeDelta(ResponseEvent):
type: Literal["response.code_interpreter_call_code.delta"] = (
"response.code_interpreter_call_code.delta"
)
output_index: int = 0
item_id: str = "item_1234"
delta: str = ""
code_output: Optional[
Union[CodeInterpreterOutputLogs, CodeInterpreterOutputImage]
] = None
class ResponseCodeInterpreterCallCodeDone(ResponseEvent):
type: Literal["response.code_interpreter_call_code.done"] = (
"response.code_interpreter_call_code.done"
)
output_index: int = 0
item_id: str = "item_1234"
code: str = ""
outputs: Optional[
list[Union[CodeInterpreterOutputLogs, CodeInterpreterOutputImage]]
] = None
class ResponseCodeInterpreterCallCompleted(ResponseEvent):
type: Literal["response.code_interpreter_call.completed"] = (
"response.code_interpreter_call.completed"
)
output_index: int = 0
item_id: str = "item_1234"
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/responses_api/events.py",
"license": "Apache License 2.0",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt_oss/responses_api/inference/metal.py | """Metal backend for :mod:`gpt_oss.responses_api`."""
from typing import Callable
from gpt_oss.metal import Context, Model
# Tunables
MAX_OUTPUT_TOKENS = 100
def setup_model(checkpoint: str) -> Callable[[list[int], float], int]:
"""Load the Metal model and return an inference function."""
model = Model(checkpoint)
context = Context(model)
seed = 0
output_tokens = []
def infer_next_token(
tokens: list[int], temperature: float = 0.0, new_request: bool = False
) -> int:
"""Infer next token using incremental LCP caching when possible."""
nonlocal output_tokens
if new_request:
output_tokens = []
if len(output_tokens) == 0:
# Context handles LCP caching internally; if `tokens` matches the
# tokens in the KV cache, the KV cache is reused after reset+append.
context.reset()
for t in tokens:
context.append(t)
output_tokens = context.sample(max_output_tokens=MAX_OUTPUT_TOKENS,
temperature=temperature,
seed=seed)
return int(output_tokens.pop(0))
return infer_next_token
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/responses_api/inference/metal.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt_oss/responses_api/inference/ollama.py | """
NOTE: this is a stitched together implementation that uses Ollama for inference. It's primarily used
for testing and development. It does not leverage any prompt caching or other optimizations and
can therefore be slow between turns.
"""
import json
import threading
import time
from typing import Callable, Optional
import requests
from openai_harmony import HarmonyEncodingName, load_harmony_encoding
EOS_TOKEN = 200002 # only used on hard timeout
# Tunables
POLL_INTERVAL_S = 0.01 # 10ms between buffer checks
CALL_MAX_WAIT_S = 0.250 # max time to block inside a single infer call
NO_TOKEN_TIMEOUT_S = 15.0 # overall inactivity timeout before emitting EOS
FIRST_BYTE_TIMEOUT_S = 30.0 # time to wait for first token before EOS
# Shared state
_token_buffer: list[int] = []
_buffer_lock = threading.Lock()
_stream_thread: Optional[threading.Thread] = None
_stream_done = threading.Event()
_stream_error: Optional[Exception] = None
_last_progress_ts: float = 0.0 # updated whenever we enqueue or dequeue tokens
_previous_request_tokens: list[int] = []
def lcp(cache: list[int], inp: list[int]) -> list[int]:
i = 0
max_len = min(len(cache), len(inp))
while i < max_len and cache[i] == inp[i]:
i += 1
return cache[:i]
def _now():
return time.monotonic()
def _touch_progress():
global _last_progress_ts
_last_progress_ts = _now()
def _reset_stream_state():
global _token_buffer, _stream_thread, _stream_error
with _buffer_lock:
_token_buffer = []
_stream_done.clear()
_stream_thread = None
_stream_error = None
_touch_progress()
def setup_model(checkpoint: str) -> Callable[[list[int], float, bool], int]:
encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
model_name = checkpoint
def _start_stream(token_ids: list[int], temperature: float):
prompt_text = encoding.decode(token_ids)
def run():
nonlocal prompt_text, temperature
global _stream_error
global _previous_request_tokens
accum_text = ""
last_len = 0 # number of tokens already emitted
try:
url = "http://localhost:11434/api/generate"
payload = {
"model": model_name,
"prompt": prompt_text,
"stream": True,
"options": {"temperature": temperature},
"raw": True,
}
with requests.post(url, json=payload, stream=True, timeout=60) as resp:
resp.raise_for_status()
for line in resp.iter_lines(decode_unicode=True):
if not line:
continue
obj = json.loads(line)
if isinstance(obj.get("response"), str):
accum_text += obj["response"]
toks = encoding.encode(accum_text, allowed_special="all")
if len(toks) > last_len:
new_toks = toks[last_len:]
with _buffer_lock:
_token_buffer.extend(new_toks)
last_len = len(toks)
_touch_progress()
if obj.get("done", False):
_token_buffer.append(EOS_TOKEN)
last_len = len(toks)
_touch_progress()
break
_stream_done.set()
except Exception as e:
_stream_error = e
_stream_done.set()
t = threading.Thread(target=run, name="ollama-stream", daemon=True)
t.start()
return t
def infer_next_token(
tokens: list[int], temperature: float = 0.0, new_request: bool = False
) -> int:
"""
- Starts a new Ollama stream on new_request.
- Forwards tokens as they arrive.
- Only emits EOS_TOKEN if we exceed an inactivity timeout.
"""
global _stream_thread
if new_request:
_reset_stream_state()
_stream_thread = _start_stream(token_ids=tokens, temperature=temperature)
# Wait for first byte within FIRST_BYTE_TIMEOUT_S (without emitting EOS early)
start = _now()
while _now() - start < FIRST_BYTE_TIMEOUT_S:
with _buffer_lock:
if _token_buffer:
tok = _token_buffer.pop(0)
_touch_progress()
return tok
if _stream_error is not None:
raise RuntimeError(f"Ollama stream error: {_stream_error!r}")
# If Ollama finished instantly with no output, continue loop until timeout
time.sleep(POLL_INTERVAL_S)
# Hard first-byte timeout -> emit EOS so the server can stop this request
return EOS_TOKEN
if _stream_error is not None:
raise RuntimeError(f"Ollama stream error: {_stream_error!r}")
# Normal path: wait up to CALL_MAX_WAIT_S for a token to arrive
wait_start = _now()
while _now() - wait_start < CALL_MAX_WAIT_S:
with _buffer_lock:
if _token_buffer:
tok = _token_buffer.pop(0)
_touch_progress()
return tok
# No token yet; if we've been idle too long overall, end with EOS
if _now() - _last_progress_ts > NO_TOKEN_TIMEOUT_S:
return EOS_TOKEN
time.sleep(POLL_INTERVAL_S)
# Still no token in this call slice. Do NOT send EOS unless we've timed out.
if _now() - _last_progress_ts > NO_TOKEN_TIMEOUT_S:
return EOS_TOKEN
# Tell caller to call us again; block minimally by returning *nothing new*.
# We must return an int; safest is to wait a tiny bit longer for a token.
# If still none, keep returning only after short waits. Avoid EOS here.
# One more short wait to reduce hot-looping:
time.sleep(POLL_INTERVAL_S)
with _buffer_lock:
if _token_buffer:
tok = _token_buffer.pop(0)
_touch_progress()
return tok
# As a last resort for this call slice, return EOS only on true inactivity timeout.
if _now() - _last_progress_ts > NO_TOKEN_TIMEOUT_S:
return EOS_TOKEN
# If we reach here, we still haven't got a token—ask the caller to call again soon.
# Return a harmless token that the server will replace/ignore if your interface supports it.
# If your interface does NOT allow a sentinel, keep the short-blocking behavior above.
return (
EOS_TOKEN if False else 0
) # replace `0` with a PAD/NOOP token your server ignores
return infer_next_token
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/responses_api/inference/ollama.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/responses_api/inference/triton.py | import datetime
import os
from typing import Callable
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
import torch
import torch.distributed as dist
from gpt_oss.triton.model import Cache, ModelConfig, Transformer
DEFAULT_TEMPERATURE = 0.0
CONTEXT = 16_384
CONCURRENT_SESSIONS = 1
rank = int(
os.environ.get("RANK", 0)
) # set this env var to another value to run on other GPUs
def load_model(checkpoint: str):
print(f"[{rank}] loading model...")
torch.cuda.set_device(rank)
torch.set_grad_enabled(False)
device = torch.device(f"cuda:{rank}")
# Load model
model = Transformer.from_checkpoint(checkpoint, device=device)
print(f"[{rank}] loaded")
return model, device
def get_infer_next_token(model, device):
caches = [
Cache(CONCURRENT_SESSIONS, CONTEXT, model.config.num_key_value_heads)
for _ in range(len(model.block))
]
# offsets = torch.zeros(CONCURRENT_SESSIONS, dtype=torch.int32, device=device) # TBD
input_token = torch.zeros(
1, dtype=torch.int32, device=device
) # add concurrent sessions support
tokens_so_far = []
model.prefill(torch.zeros(1, 4, dtype=torch.int32, device=device), caches)
graph = torch.cuda.CUDAGraph()
with torch.cuda.graph(graph):
logits = model(input_token[None, :], caches=caches)[0]
def lcp(cache: list[int], inp: list[int]) -> list[int]:
i = 0
max_len = min(len(cache), len(inp))
while i < max_len and cache[i] == inp[i]:
i += 1
return cache[:i]
def sample_next_token(
logits: torch.Tensor, temperature: float = DEFAULT_TEMPERATURE
) -> int:
"""Executed only on rank 0."""
if temperature == 0.0:
return torch.argmax(logits[-1, :], dim=-1).item()
probs = torch.softmax(logits * (1.0 / temperature), dim=-1)
return torch.multinomial(probs[-1, :], num_samples=1).item()
@torch.inference_mode()
def infer_next_token(
tokens: list[int],
temperature: float = DEFAULT_TEMPERATURE,
new_request: bool = False,
) -> int:
nonlocal tokens_so_far
tokens_so_far = lcp(tokens_so_far, tokens)
for cache in caches:
cache.truncate(len(tokens_so_far))
all_tokens = tokens # for pdb
tokens = tokens[len(tokens_so_far) :]
if len(tokens) > 1:
model.prefill(
torch.as_tensor(tokens[:-1], dtype=torch.int32, device=device)[None, :],
caches,
)
if len(tokens) == 0:
breakpoint()
input_token[-1] = tokens[-1]
graph.replay()
# decide next token on rank‑0
next_tok = sample_next_token(logits, temperature=temperature)
return next_tok
return infer_next_token
def setup_model(checkpoint: str) -> Callable[[list[int], float], int]:
model, device = load_model(checkpoint)
infer_next_token = get_infer_next_token(model, device)
return infer_next_token
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/responses_api/inference/triton.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt_oss/responses_api/inference/vllm.py | """
NOTE: this is not the most efficient way to use vLLM. It's a simple implementation that infers
one token at a time to mimic the behavior of the Triton implementation.
"""
import os
from typing import Callable, List, Optional
# vLLM imports
from vllm import LLM, SamplingParams
from vllm.inputs import TokensPrompt
DEFAULT_TEMPERATURE = 0.0
TP = os.environ.get("TP", 2)
def load_model(checkpoint: str):
"""
Create the vLLM engine. We enable prefix caching so repeated prefixes
across calls can reuse KV cache for faster prefill.
"""
llm = LLM(
model=checkpoint,
tensor_parallel_size=TP, # set >1 if you want TP across GPUs
enable_prefix_caching=True, # reuse KV for shared prefixes
disable_log_stats=True, # uncomment to quiet logs
)
return llm
def get_infer_next_token(llm: LLM):
"""
Return a callable with the same shape as your original:
infer_next_token(tokens: List[int], temperature: float, new_request: bool) -> int
Implementation detail:
- We issue a single-token generation with TokensPrompt(prompt_token_ids=tokens).
- vLLM handles sampling (temperature=0 => greedy).
- With enable_prefix_caching=True, the shared prefix prefill can be reused
across calls that share the same prefix.
"""
# Maintain compatibility with your previous closure signature.
def infer_next_token(
tokens: List[int],
temperature: float = DEFAULT_TEMPERATURE,
new_request: bool = False, # kept for interface compatibility; unused here
) -> int:
if not tokens:
raise ValueError("tokens must contain at least one input token id")
sampling = SamplingParams(
temperature=float(temperature),
max_tokens=1, # we only want the next token
n=1, # single continuation
# You can expose/enable more controls here (top_p, top_k, etc.)
)
# Provide token IDs directly (no re-tokenization).
outputs = llm.generate(
TokensPrompt(prompt_token_ids=tokens),
sampling_params=sampling,
)
if not outputs or not outputs[0].outputs:
raise RuntimeError("vLLM returned empty outputs")
gen = outputs[0].outputs[0]
if not gen.token_ids:
# If the model immediately finished (e.g., EOS), decide how you'd like
# to signal that. Here we raise; you could also return an EOS id.
raise RuntimeError("No next token was generated (possibly EOS).")
next_tok = int(gen.token_ids[0])
return next_tok
return infer_next_token
def setup_model(checkpoint: str) -> Callable[[List[int], float, bool], int]:
llm = load_model(checkpoint)
infer_next_token = get_infer_next_token(llm)
return infer_next_token
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/responses_api/inference/vllm.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/responses_api/serve.py | # torchrun --nproc-per-node=4 serve.py
import argparse
import uvicorn
from openai_harmony import (
HarmonyEncodingName,
load_harmony_encoding,
)
from .api_server import create_api_server
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Responses API server")
parser.add_argument(
"--checkpoint",
metavar="FILE",
type=str,
help="Path to the SafeTensors checkpoint",
default="~/model",
required=False,
)
parser.add_argument(
"--port",
metavar="PORT",
type=int,
default=8000,
help="Port to run the server on",
)
parser.add_argument(
"--inference-backend",
metavar="BACKEND",
type=str,
help="Inference backend to use",
# default to metal on macOS, triton on other platforms
default="metal" if __import__("platform").system() == "Darwin" else "triton",
)
args = parser.parse_args()
if args.inference_backend == "triton":
from .inference.triton import setup_model
elif args.inference_backend == "stub":
from .inference.stub import setup_model
elif args.inference_backend == "metal":
from .inference.metal import setup_model
elif args.inference_backend == "ollama":
from .inference.ollama import setup_model
elif args.inference_backend == "vllm":
from .inference.vllm import setup_model
elif args.inference_backend == "transformers":
from .inference.transformers import setup_model
else:
raise ValueError(f"Invalid inference backend: {args.inference_backend}")
encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
infer_next_token = setup_model(args.checkpoint)
uvicorn.run(create_api_server(infer_next_token, encoding), port=args.port)
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/responses_api/serve.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt_oss/responses_api/types.py | from typing import Any, Dict, Literal, Optional, Union
from openai_harmony import ReasoningEffort
from pydantic import BaseModel, ConfigDict
MODEL_IDENTIFIER = "gpt-oss-120b"
DEFAULT_TEMPERATURE = 0.0
REASONING_EFFORT = ReasoningEffort.LOW
DEFAULT_MAX_OUTPUT_TOKENS = 131072
class UrlCitation(BaseModel):
type: Literal["url_citation"]
end_index: int
start_index: int
url: str
title: str
class TextContentItem(BaseModel):
type: Union[Literal["text"], Literal["input_text"], Literal["output_text"]]
text: str
status: Optional[str] = "completed"
annotations: Optional[list[UrlCitation]] = None
class SummaryTextContentItem(BaseModel):
# using summary for compatibility with the existing API
type: Literal["summary_text"]
text: str
class ReasoningTextContentItem(BaseModel):
type: Literal["reasoning_text"]
text: str
class ReasoningItem(BaseModel):
id: str = "rs_1234"
type: Literal["reasoning"]
summary: list[SummaryTextContentItem]
content: Optional[list[ReasoningTextContentItem]] = []
class Item(BaseModel):
id: Optional[str] = None
type: Optional[Literal["message"]] = "message"
role: Literal["user", "assistant", "system"]
content: Union[list[TextContentItem], str]
status: Union[Literal["in_progress", "completed", "incomplete"], None] = None
class FunctionCallItem(BaseModel):
type: Literal["function_call"]
name: str
arguments: str
status: Literal["in_progress", "completed", "incomplete"] = "completed"
id: str = "fc_1234"
call_id: str = "call_1234"
class FunctionCallOutputItem(BaseModel):
type: Literal["function_call_output"]
call_id: str = "call_1234"
output: str
class WebSearchActionSearch(BaseModel):
type: Literal["search"]
query: Optional[str] = None
class WebSearchActionOpenPage(BaseModel):
type: Literal["open_page"]
url: Optional[str] = None
class WebSearchActionFind(BaseModel):
type: Literal["find"]
pattern: Optional[str] = None
url: Optional[str] = None
class WebSearchCallItem(BaseModel):
type: Literal["web_search_call"]
id: str = "ws_1234"
status: Literal["in_progress", "completed", "incomplete"] = "completed"
action: Union[WebSearchActionSearch, WebSearchActionOpenPage, WebSearchActionFind]
class CodeInterpreterOutputLogs(BaseModel):
type: Literal["logs"]
logs: str
class CodeInterpreterOutputImage(BaseModel):
type: Literal["image"]
url: str
class CodeInterpreterCallItem(BaseModel):
type: Literal["code_interpreter_call"]
id: str = "ci_1234"
status: Literal[
"in_progress",
"completed",
"incomplete",
"interpreting",
"failed",
] = "completed"
code: Optional[str] = None
container_id: Optional[str] = None
outputs: Optional[
list[Union[CodeInterpreterOutputLogs, CodeInterpreterOutputImage]]
] = None
class Error(BaseModel):
code: str
message: str
class IncompleteDetails(BaseModel):
reason: str
class Usage(BaseModel):
input_tokens: int
output_tokens: int
total_tokens: int
class FunctionToolDefinition(BaseModel):
type: Literal["function"]
name: str
parameters: dict # this should be typed stricter if you add strict mode
strict: bool = False # change this if you support strict mode
description: Optional[str] = ""
class BrowserToolConfig(BaseModel):
model_config = ConfigDict(extra='allow')
type: Literal["browser_search"] | Literal["web_search"]
class CodeInterpreterToolConfig(BaseModel):
type: Literal["code_interpreter"]
class ReasoningConfig(BaseModel):
effort: Literal["low", "medium", "high"] = REASONING_EFFORT
class ResponsesRequest(BaseModel):
instructions: Optional[str] = None
max_output_tokens: Optional[int] = DEFAULT_MAX_OUTPUT_TOKENS
input: Union[
str,
list[
Union[
Item,
ReasoningItem,
FunctionCallItem,
FunctionCallOutputItem,
WebSearchCallItem,
CodeInterpreterCallItem,
]
],
]
model: Optional[str] = MODEL_IDENTIFIER
stream: Optional[bool] = False
tools: Optional[
list[
Union[FunctionToolDefinition, BrowserToolConfig, CodeInterpreterToolConfig]
]
] = []
reasoning: Optional[ReasoningConfig] = ReasoningConfig()
metadata: Optional[Dict[str, Any]] = {}
tool_choice: Optional[Literal["auto", "none"]] = "auto"
parallel_tool_calls: Optional[bool] = False
store: Optional[bool] = False
previous_response_id: Optional[str] = None
temperature: Optional[float] = DEFAULT_TEMPERATURE
include: Optional[list[str]] = None
class ResponseObject(BaseModel):
output: list[
Union[
Item,
ReasoningItem,
FunctionCallItem,
FunctionCallOutputItem,
WebSearchCallItem,
CodeInterpreterCallItem,
]
]
created_at: int
usage: Optional[Usage] = None
status: Literal["completed", "failed", "incomplete", "in_progress"] = "in_progress"
background: None = None
error: Optional[Error] = None
incomplete_details: Optional[IncompleteDetails] = None
instructions: Optional[str] = None
max_output_tokens: Optional[int] = None
max_tool_calls: Optional[int] = None
metadata: Optional[Dict[str, Any]] = {}
model: Optional[str] = MODEL_IDENTIFIER
parallel_tool_calls: Optional[bool] = False
previous_response_id: Optional[str] = None
id: Optional[str] = "resp_1234"
object: Optional[str] = "response"
text: Optional[Dict[str, Any]] = None
tool_choice: Optional[str] = "auto"
top_p: Optional[int] = 1
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/responses_api/types.py",
"license": "Apache License 2.0",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt_oss/tokenizer.py | import tiktoken
def get_tokenizer():
o200k_base = tiktoken.get_encoding("o200k_base")
tokenizer = tiktoken.Encoding(
name="o200k_harmony",
pat_str=o200k_base._pat_str,
mergeable_ranks=o200k_base._mergeable_ranks,
special_tokens={
**o200k_base._special_tokens,
"<|startoftext|>": 199998,
"<|endoftext|>": 199999,
"<|reserved_200000|>": 200000,
"<|reserved_200001|>": 200001,
"<|return|>": 200002,
"<|constrain|>": 200003,
"<|reserved_200004|>": 200004,
"<|channel|>": 200005,
"<|start|>": 200006,
"<|end|>": 200007,
"<|message|>": 200008,
"<|reserved_200009|>": 200009,
"<|reserved_200010|>": 200010,
"<|reserved_200011|>": 200011,
"<|call|>": 200012,
} | {
f"<|reserved_{i}|>": i for i in range(200013, 201088)
},
)
return tokenizer
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/tokenizer.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt_oss/tools/apply_patch.py | #!/usr/bin/env python3
"""
A self-contained **pure-Python 3.9+** utility for applying human-readable
“pseudo-diff” patch files to a collection of text files.
Source: https://cookbook.openai.com/examples/gpt4-1_prompting_guide
"""
from __future__ import annotations
import pathlib
from dataclasses import dataclass, field
from enum import Enum
from typing import (
Callable,
Dict,
List,
Optional,
Tuple,
Union,
)
# --------------------------------------------------------------------------- #
# Domain objects
# --------------------------------------------------------------------------- #
class ActionType(str, Enum):
ADD = "add"
DELETE = "delete"
UPDATE = "update"
@dataclass
class FileChange:
type: ActionType
old_content: Optional[str] = None
new_content: Optional[str] = None
move_path: Optional[str] = None
@dataclass
class Commit:
changes: Dict[str, FileChange] = field(default_factory=dict)
# --------------------------------------------------------------------------- #
# Exceptions
# --------------------------------------------------------------------------- #
class DiffError(ValueError):
"""Any problem detected while parsing or applying a patch."""
# --------------------------------------------------------------------------- #
# Helper dataclasses used while parsing patches
# --------------------------------------------------------------------------- #
@dataclass
class Chunk:
orig_index: int = -1
del_lines: List[str] = field(default_factory=list)
ins_lines: List[str] = field(default_factory=list)
@dataclass
class PatchAction:
type: ActionType
new_file: Optional[str] = None
chunks: List[Chunk] = field(default_factory=list)
move_path: Optional[str] = None
@dataclass
class Patch:
actions: Dict[str, PatchAction] = field(default_factory=dict)
# --------------------------------------------------------------------------- #
# Patch text parser
# --------------------------------------------------------------------------- #
@dataclass
class Parser:
current_files: Dict[str, str]
lines: List[str]
index: int = 0
patch: Patch = field(default_factory=Patch)
fuzz: int = 0
# ------------- low-level helpers -------------------------------------- #
def _cur_line(self) -> str:
if self.index >= len(self.lines):
raise DiffError("Unexpected end of input while parsing patch")
return self.lines[self.index]
@staticmethod
def _norm(line: str) -> str:
"""Strip CR so comparisons work for both LF and CRLF input."""
return line.rstrip("\r")
# ------------- scanning convenience ----------------------------------- #
def is_done(self, prefixes: Optional[Tuple[str, ...]] = None) -> bool:
if self.index >= len(self.lines):
return True
if (
prefixes
and len(prefixes) > 0
and self._norm(self._cur_line()).startswith(prefixes)
):
return True
return False
def startswith(self, prefix: Union[str, Tuple[str, ...]]) -> bool:
return self._norm(self._cur_line()).startswith(prefix)
def read_str(self, prefix: str) -> str:
"""
Consume the current line if it starts with *prefix* and return the text
**after** the prefix. Raises if prefix is empty.
"""
if prefix == "":
raise ValueError("read_str() requires a non-empty prefix")
if self._norm(self._cur_line()).startswith(prefix):
text = self._cur_line()[len(prefix) :]
self.index += 1
return text
return ""
def read_line(self) -> str:
"""Return the current raw line and advance."""
line = self._cur_line()
self.index += 1
return line
# ------------- public entry point -------------------------------------- #
def parse(self) -> None:
while not self.is_done(("*** End Patch",)):
# ---------- UPDATE ---------- #
path = self.read_str("*** Update File: ")
if path:
if path in self.patch.actions:
raise DiffError(f"Duplicate update for file: {path}")
move_to = self.read_str("*** Move to: ")
if path not in self.current_files:
raise DiffError(f"Update File Error - missing file: {path}")
text = self.current_files[path]
action = self._parse_update_file(text)
action.move_path = move_to or None
self.patch.actions[path] = action
continue
# ---------- DELETE ---------- #
path = self.read_str("*** Delete File: ")
if path:
if path in self.patch.actions:
raise DiffError(f"Duplicate delete for file: {path}")
if path not in self.current_files:
raise DiffError(f"Delete File Error - missing file: {path}")
self.patch.actions[path] = PatchAction(type=ActionType.DELETE)
continue
# ---------- ADD ---------- #
path = self.read_str("*** Add File: ")
if path:
if path in self.patch.actions:
raise DiffError(f"Duplicate add for file: {path}")
if path in self.current_files:
raise DiffError(f"Add File Error - file already exists: {path}")
self.patch.actions[path] = self._parse_add_file()
continue
raise DiffError(f"Unknown line while parsing: {self._cur_line()}")
if not self.startswith("*** End Patch"):
raise DiffError("Missing *** End Patch sentinel")
self.index += 1 # consume sentinel
# ------------- section parsers ---------------------------------------- #
def _parse_update_file(self, text: str) -> PatchAction:
action = PatchAction(type=ActionType.UPDATE)
lines = text.split("\n")
index = 0
while not self.is_done(
(
"*** End Patch",
"*** Update File:",
"*** Delete File:",
"*** Add File:",
"*** End of File",
)
):
def_str = self.read_str("@@ ")
section_str = ""
if not def_str and self._norm(self._cur_line()) == "@@":
section_str = self.read_line()
if not (def_str or section_str or index == 0):
raise DiffError(f"Invalid line in update section:\n{self._cur_line()}")
if def_str.strip():
found = False
if def_str not in lines[:index]:
for i, s in enumerate(lines[index:], index):
if s == def_str:
index = i + 1
found = True
break
if not found and def_str.strip() not in [
s.strip() for s in lines[:index]
]:
for i, s in enumerate(lines[index:], index):
if s.strip() == def_str.strip():
index = i + 1
self.fuzz += 1
found = True
break
next_ctx, chunks, end_idx, eof = peek_next_section(self.lines, self.index)
new_index, fuzz = find_context(lines, next_ctx, index, eof)
if new_index == -1:
ctx_txt = "\n".join(next_ctx)
raise DiffError(
f"Invalid {'EOF ' if eof else ''}context at {index}:\n{ctx_txt}"
)
self.fuzz += fuzz
for ch in chunks:
ch.orig_index += new_index
action.chunks.append(ch)
index = new_index + len(next_ctx)
self.index = end_idx
return action
def _parse_add_file(self) -> PatchAction:
lines: List[str] = []
while not self.is_done(
("*** End Patch", "*** Update File:", "*** Delete File:", "*** Add File:")
):
s = self.read_line()
if not s.startswith("+"):
raise DiffError(f"Invalid Add File line (missing '+'): {s}")
lines.append(s[1:]) # strip leading '+'
return PatchAction(type=ActionType.ADD, new_file="\n".join(lines))
# --------------------------------------------------------------------------- #
# Helper functions
# --------------------------------------------------------------------------- #
def find_context_core(
lines: List[str], context: List[str], start: int
) -> Tuple[int, int]:
if not context:
return start, 0
for i in range(start, len(lines)):
if lines[i : i + len(context)] == context:
return i, 0
for i in range(start, len(lines)):
if [s.rstrip() for s in lines[i : i + len(context)]] == [
s.rstrip() for s in context
]:
return i, 1
for i in range(start, len(lines)):
if [s.strip() for s in lines[i : i + len(context)]] == [
s.strip() for s in context
]:
return i, 100
return -1, 0
def find_context(
lines: List[str], context: List[str], start: int, eof: bool
) -> Tuple[int, int]:
if eof:
new_index, fuzz = find_context_core(lines, context, len(lines) - len(context))
if new_index != -1:
return new_index, fuzz
new_index, fuzz = find_context_core(lines, context, start)
return new_index, fuzz + 10_000
return find_context_core(lines, context, start)
def peek_next_section(
lines: List[str], index: int
) -> Tuple[List[str], List[Chunk], int, bool]:
old: List[str] = []
del_lines: List[str] = []
ins_lines: List[str] = []
chunks: List[Chunk] = []
mode = "keep"
orig_index = index
while index < len(lines):
s = lines[index]
if s.startswith(
(
"@@",
"*** End Patch",
"*** Update File:",
"*** Delete File:",
"*** Add File:",
"*** End of File",
)
):
break
if s == "***":
break
if s.startswith("***"):
raise DiffError(f"Invalid Line: {s}")
index += 1
last_mode = mode
if s == "":
s = " "
if s[0] == "+":
mode = "add"
elif s[0] == "-":
mode = "delete"
elif s[0] == " ":
mode = "keep"
else:
raise DiffError(f"Invalid Line: {s}")
s = s[1:]
if mode == "keep" and last_mode != mode:
if ins_lines or del_lines:
chunks.append(
Chunk(
orig_index=len(old) - len(del_lines),
del_lines=del_lines,
ins_lines=ins_lines,
)
)
del_lines, ins_lines = [], []
if mode == "delete":
del_lines.append(s)
old.append(s)
elif mode == "add":
ins_lines.append(s)
elif mode == "keep":
old.append(s)
if ins_lines or del_lines:
chunks.append(
Chunk(
orig_index=len(old) - len(del_lines),
del_lines=del_lines,
ins_lines=ins_lines,
)
)
if index < len(lines) and lines[index] == "*** End of File":
index += 1
return old, chunks, index, True
if index == orig_index:
raise DiffError("Nothing in this section")
return old, chunks, index, False
# --------------------------------------------------------------------------- #
# Patch → Commit and Commit application
# --------------------------------------------------------------------------- #
def _get_updated_file(text: str, action: PatchAction, path: str) -> str:
if action.type is not ActionType.UPDATE:
raise DiffError("_get_updated_file called with non-update action")
orig_lines = text.split("\n")
dest_lines: List[str] = []
orig_index = 0
for chunk in action.chunks:
if chunk.orig_index > len(orig_lines):
raise DiffError(
f"{path}: chunk.orig_index {chunk.orig_index} exceeds file length"
)
if orig_index > chunk.orig_index:
raise DiffError(
f"{path}: overlapping chunks at {orig_index} > {chunk.orig_index}"
)
dest_lines.extend(orig_lines[orig_index : chunk.orig_index])
orig_index = chunk.orig_index
dest_lines.extend(chunk.ins_lines)
orig_index += len(chunk.del_lines)
dest_lines.extend(orig_lines[orig_index:])
return "\n".join(dest_lines)
def patch_to_commit(patch: Patch, orig: Dict[str, str]) -> Commit:
commit = Commit()
for path, action in patch.actions.items():
if action.type is ActionType.DELETE:
commit.changes[path] = FileChange(
type=ActionType.DELETE, old_content=orig[path]
)
elif action.type is ActionType.ADD:
if action.new_file is None:
raise DiffError("ADD action without file content")
commit.changes[path] = FileChange(
type=ActionType.ADD, new_content=action.new_file
)
elif action.type is ActionType.UPDATE:
new_content = _get_updated_file(orig[path], action, path)
commit.changes[path] = FileChange(
type=ActionType.UPDATE,
old_content=orig[path],
new_content=new_content,
move_path=action.move_path,
)
return commit
# --------------------------------------------------------------------------- #
# User-facing helpers
# --------------------------------------------------------------------------- #
def text_to_patch(text: str, orig: Dict[str, str]) -> Tuple[Patch, int]:
lines = text.splitlines() # preserves blank lines, no strip()
if (
len(lines) < 2
or not Parser._norm(lines[0]).startswith("*** Begin Patch")
or Parser._norm(lines[-1]) != "*** End Patch"
):
raise DiffError("Invalid patch text - missing sentinels")
parser = Parser(current_files=orig, lines=lines, index=1)
parser.parse()
return parser.patch, parser.fuzz
def identify_files_needed(text: str) -> List[str]:
lines = text.splitlines()
return [
line[len("*** Update File: ") :]
for line in lines
if line.startswith("*** Update File: ")
] + [
line[len("*** Delete File: ") :]
for line in lines
if line.startswith("*** Delete File: ")
]
def identify_files_added(text: str) -> List[str]:
lines = text.splitlines()
return [
line[len("*** Add File: ") :]
for line in lines
if line.startswith("*** Add File: ")
]
# --------------------------------------------------------------------------- #
# File-system helpers
# --------------------------------------------------------------------------- #
def load_files(paths: List[str], open_fn: Callable[[str], str]) -> Dict[str, str]:
return {path: open_fn(path) for path in paths}
def apply_commit(
commit: Commit,
write_fn: Callable[[str, str], None],
remove_fn: Callable[[str], None],
) -> None:
for path, change in commit.changes.items():
if change.type is ActionType.DELETE:
remove_fn(path)
elif change.type is ActionType.ADD:
if change.new_content is None:
raise DiffError(f"ADD change for {path} has no content")
write_fn(path, change.new_content)
elif change.type is ActionType.UPDATE:
if change.new_content is None:
raise DiffError(f"UPDATE change for {path} has no new content")
target = change.move_path or path
write_fn(target, change.new_content)
if change.move_path:
remove_fn(path)
def open_file(path: str) -> str:
with open(path, "rt", encoding="utf-8") as fh:
return fh.read()
def write_file(path: str, content: str) -> None:
target = pathlib.Path(path)
target.parent.mkdir(parents=True, exist_ok=True)
with target.open("wt", encoding="utf-8") as fh:
fh.write(content)
def remove_file(path: str) -> None:
pathlib.Path(path).unlink(missing_ok=True)
def apply_patch(
text: str,
open_fn: Callable[[str], str] = open_file,
write_fn: Callable[[str, str], None] = write_file,
remove_fn: Callable[[str], None] = remove_file,
) -> str:
if not text.startswith("*** Begin Patch"):
raise DiffError("Patch text must start with *** Begin Patch")
paths = identify_files_needed(text)
orig = load_files(paths, open_fn)
patch, _fuzz = text_to_patch(text, orig)
commit = patch_to_commit(patch, orig)
apply_commit(commit, write_fn, remove_fn)
return "Done!"
def main() -> None:
import sys
patch_text = sys.stdin.read()
if not patch_text:
print("Please pass patch text through stdin", file=sys.stderr)
return
try:
result = apply_patch(patch_text)
except DiffError as exc:
print(exc, file=sys.stderr)
return
print(result)
if __name__ == "__main__":
main()
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/tools/apply_patch.py",
"license": "Apache License 2.0",
"lines": 446,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/tools/python_docker/docker_tool.py | # Run this before running the tool:
# $ docker image pull python:3.11
import asyncio
import contextlib
import io
import os
import queue
import subprocess
import tarfile
import tempfile
from pathlib import Path
from typing import Any, AsyncIterator
import docker
from openai_harmony import (
Author,
Content,
Message,
Role,
TextContent,
ToolNamespaceConfig,
)
from ..tool import Tool
_docker_client = None
VALID_EXECUTION_BACKENDS = {
"docker",
"dangerously_use_uv",
"dangerously_use_local_jupyter",
}
_default_backend = os.environ.get("PYTHON_EXECUTION_BACKEND", "docker")
if _default_backend not in VALID_EXECUTION_BACKENDS:
_default_backend = "docker"
PYTHON_EXECUTION_BACKEND = _default_backend
def call_python_script(script: str) -> str:
"""
Call a python script by writing it to a file in the container and executing it.
"""
global _docker_client
if _docker_client is None:
_docker_client = docker.from_env()
# pull image `python:3.11` if not present
try:
_docker_client.images.get("python:3.11")
except docker.errors.ImageNotFound:
_docker_client.images.pull("python:3.11")
# 1. Create a temporary tar archive containing the script
script_name = "script.py"
tarstream = io.BytesIO()
with tarfile.open(fileobj=tarstream, mode="w") as tar:
script_bytes = script.encode("utf-8")
tarinfo = tarfile.TarInfo(name=script_name)
tarinfo.size = len(script_bytes)
tar.addfile(tarinfo, io.BytesIO(script_bytes))
tarstream.seek(0)
# 2. Start the container
container = _docker_client.containers.create(
"python:3.11", command="sleep infinity", detach=True
)
try:
container.start()
# 3. Put the script into the container
container.put_archive(path="/tmp", data=tarstream.read())
# 4. Execute the script
exec_result = container.exec_run(f"python /tmp/{script_name}")
output = exec_result.output.decode("utf-8")
if not output.strip():
output = "[WARN] No output available. Use print() to output anything to stdout to receive the output"
finally:
container.remove(force=True)
return output
def call_python_script_with_uv(script: str) -> str:
"""
Call a python script by writing it to a file to a temporary directory
and executing it with uv.
"""
with tempfile.TemporaryDirectory() as temp_dir:
script_path = os.path.join(temp_dir, "script.py")
with open(script_path, "w") as f:
f.write(script)
exec_result = subprocess.run(
["uv", "run", "--no-project", "python", script_path],
capture_output=True)
return (
exec_result.stdout.decode("utf-8")
if exec_result.returncode == 0
else exec_result.stderr.decode("utf-8")
)
class LocalJupyterSession:
"""Stateful helper that proxies execution through a local Jupyter kernel."""
def __init__(
self,
connection_file: str | None = None,
*,
timeout: float = 120.0,
) -> None:
try:
from jupyter_client import BlockingKernelClient, KernelManager
except ImportError as exc: # pragma: no cover - optional dependency
raise RuntimeError(
"The dangerously_use_local_jupyter backend requires the jupyter_client package to be installed."
) from exc
self._default_timeout = timeout
self._owns_kernel = False
self._client: BlockingKernelClient
self._km: KernelManager | None = None
if connection_file:
connection_path = Path(connection_file).expanduser()
if not connection_path.exists():
raise FileNotFoundError(
f"Cannot find Jupyter connection file at '{connection_path}'."
)
client = BlockingKernelClient()
client.load_connection_file(str(connection_path))
client.start_channels()
# Ensure the connection is ready before executing.
client.wait_for_ready(timeout=self._default_timeout)
self._client = client
else:
km = KernelManager()
km.start_kernel()
client = km.blocking_client()
client.start_channels()
client.wait_for_ready(timeout=self._default_timeout)
self._client = client
self._km = km
self._owns_kernel = True
def execute(self, code: str, *, timeout: float | None = None) -> str:
"""Execute code in the kernel, returning combined stdout/stderr output."""
client = self._client
effective_timeout = timeout or self._default_timeout
msg_id = client.execute(
code,
store_history=True,
allow_stdin=False,
stop_on_error=False,
)
stdout_parts: list[str] = []
stderr_parts: list[str] = []
while True:
try:
msg = client.get_iopub_msg(timeout=effective_timeout)
except queue.Empty as exc:
raise TimeoutError("Timed out waiting for Jupyter kernel output.") from exc
if msg.get("parent_header", {}).get("msg_id") != msg_id:
continue
msg_type = msg.get("msg_type")
content = msg.get("content", {})
if msg_type == "stream":
text = content.get("text", "")
if content.get("name") == "stdout":
stdout_parts.append(text)
else:
stderr_parts.append(text)
elif msg_type == "error":
traceback_data = content.get("traceback")
if traceback_data:
stderr_parts.append("\n".join(traceback_data))
else:
ename = content.get("ename", "")
evalue = content.get("evalue", "")
stderr_parts.append(f"{ename}: {evalue}".strip())
elif msg_type in {"execute_result", "display_data"}:
data = content.get("data", {})
text = data.get("text/plain")
if text:
stdout_parts.append(text if text.endswith("\n") else f"{text}\n")
elif msg_type == "status" and content.get("execution_state") == "idle":
break
# Drain the shell channel to capture final execution status.
while True:
try:
reply = client.get_shell_msg(timeout=effective_timeout)
except queue.Empty as exc:
raise TimeoutError(
"Timed out waiting for Jupyter kernel execution reply."
) from exc
if reply.get("parent_header", {}).get("msg_id") != msg_id:
continue
reply_content = reply.get("content", {})
if reply_content.get("status") == "error":
traceback_data = reply_content.get("traceback")
if traceback_data:
stderr_parts.append("\n".join(traceback_data))
else:
ename = reply_content.get("ename", "")
evalue = reply_content.get("evalue", "")
stderr_parts.append(f"{ename}: {evalue}".strip())
break
stdout = "".join(stdout_parts)
stderr = "".join(stderr_parts)
if stderr:
if stdout:
stdout = f"{stdout.rstrip()}\n{stderr}"
else:
stdout = stderr
if not stdout.strip():
stdout = (
"[WARN] No output available. Use print() to output anything to stdout to "
"receive the output"
)
return stdout
def close(self) -> None:
with contextlib.suppress(Exception):
self._client.stop_channels()
if self._owns_kernel and self._km is not None:
with contextlib.suppress(Exception):
self._km.shutdown_kernel(now=True)
def __del__(self) -> None: # pragma: no cover - best-effort cleanup
self.close()
class PythonTool(Tool):
def __init__(
self,
name: str = "python",
*,
execution_backend: str | None = None,
local_jupyter_connection_file: str | None = None,
local_jupyter_timeout: float = 60.0,
):
assert name == "python"
backend = execution_backend or PYTHON_EXECUTION_BACKEND
if backend not in VALID_EXECUTION_BACKENDS:
raise ValueError(
"execution_backend must be one of: "
+ ", ".join(sorted(VALID_EXECUTION_BACKENDS))
)
self._execution_backend = backend
self._local_jupyter_connection_file = (
local_jupyter_connection_file
or os.environ.get("PYTHON_LOCAL_JUPYTER_CONNECTION_FILE")
)
self._local_jupyter_timeout = local_jupyter_timeout
self._jupyter_session: LocalJupyterSession | None = None
self._execution_lock: asyncio.Lock | None = None
if self._execution_backend == "dangerously_use_local_jupyter":
self._execution_lock = asyncio.Lock()
self._jupyter_session = LocalJupyterSession(
connection_file=self._local_jupyter_connection_file,
timeout=self._local_jupyter_timeout,
)
@classmethod
def get_tool_name(cls) -> str:
return "python"
@property
def name(self) -> str:
return self.get_tool_name()
@property
def instruction(self) -> str:
if self._execution_backend == "dangerously_use_local_jupyter":
return """
Use this tool to execute Python code in your chain of thought. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files).
When you send a message containing Python code to python, it will be executed in a stateful Jupyter notebook environment. python will respond with the output of the execution or time out after 120.0 seconds. Internet access for this session is UNKNOWN. Depends on the cluster.
""".strip()
return """
Use this tool to execute STATELESS Python code in your chain of thought. The code will not be shown to the user. This tool should be used for internal reasoning, but not for code that is intended to be visible to the user (e.g. when creating plots, tables, or files).
When you send a message containing python code to python, it will be executed in a stateless docker container, and the stdout of that process will be returned to you. You have to use print statements to access the output.
IMPORTANT: Your python environment is not shared between calls. You will have to pass your entire code each time.
""".strip()
@property
def tool_config(self) -> ToolNamespaceConfig:
return ToolNamespaceConfig(
name=self.get_tool_name(), description=self.instruction, tools=[]
)
def _make_response(
self,
output: str,
channel: str | None = None,
) -> Message:
content = TextContent(text=output)
return self.make_response(content=content, channel=channel)
def make_response(
self,
content: Content,
*,
metadata: dict[str, Any] | None = None,
author: Author | None = None,
channel: str | None = None,
) -> Message:
tool_name = self.get_tool_name()
author = Author(role=Role.TOOL, name=f"{tool_name}")
message = Message(
author=author,
content=[content],
).with_recipient("assistant")
if channel:
message = message.with_channel(channel)
return message
async def _process(self, message: Message) -> AsyncIterator[Message]:
script = message.content[0].text
channel = message.channel
if self._execution_backend == "docker":
output = call_python_script(script)
elif self._execution_backend == "dangerously_use_uv":
output = call_python_script_with_uv(script)
elif self._execution_backend == "dangerously_use_local_jupyter":
assert self._jupyter_session is not None
lock = self._execution_lock
if lock is not None:
async with lock:
try:
output = self._jupyter_session.execute(script)
except TimeoutError as exc:
output = f"[ERROR] {exc}"
else:
try:
output = self._jupyter_session.execute(script)
except TimeoutError as exc:
output = f"[ERROR] {exc}"
else:
raise ValueError(
f"Invalid PYTHON_EXECUTION_BACKEND: {self._execution_backend}"
)
yield self._make_response(output, channel=channel)
def close(self) -> None:
if self._jupyter_session is not None:
self._jupyter_session.close()
def __del__(self) -> None: # pragma: no cover - best-effort cleanup
self.close()
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/tools/python_docker/docker_tool.py",
"license": "Apache License 2.0",
"lines": 316,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/tools/simple_browser/backend.py | """
Simple backend for the simple browser tool.
"""
import functools
import asyncio
import logging
import os
from abc import abstractmethod
from importlib.metadata import version
from typing import Callable, ParamSpec, TypeVar
from urllib.parse import quote
import chz
from aiohttp import ClientSession, ClientTimeout
from tenacity import (
after_log,
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from .page_contents import (
Extract,
FetchResult,
PageContents,
get_domain,
process_html,
)
logger = logging.getLogger(__name__)
VIEW_SOURCE_PREFIX = "view-source:"
try:
_GPT_OSS_VERSION = version("gpt-oss")
except Exception:
_GPT_OSS_VERSION = "0.0.8" # fallback version
class BackendError(Exception):
pass
P = ParamSpec("P")
R = TypeVar("R")
def with_retries(
func: Callable[P, R],
num_retries: int,
max_wait_time: float,
) -> Callable[P, R]:
if num_retries > 0:
retry_decorator = retry(
stop=stop_after_attempt(num_retries),
wait=wait_exponential(
multiplier=1,
min=2,
max=max_wait_time,
),
before_sleep=before_sleep_log(logger, logging.INFO),
after=after_log(logger, logging.INFO),
retry=retry_if_exception_type(Exception),
)
return retry_decorator(func)
else:
return func
def maybe_truncate(text: str, num_chars: int = 1024) -> str:
if len(text) > num_chars:
text = text[: (num_chars - 3)] + "..."
return text
@chz.chz(typecheck=True)
class Backend:
source: str = chz.field(doc="Description of the backend source")
@abstractmethod
async def search(
self,
query: str,
topn: int,
session: ClientSession,
) -> PageContents:
pass
@abstractmethod
async def fetch(self, url: str, session: ClientSession) -> PageContents:
pass
async def _post(self, session: ClientSession, endpoint: str, payload: dict) -> dict:
headers = {
"x-api-key": self._get_api_key(),
"user-agent": f"gpt-oss/{_GPT_OSS_VERSION}",
}
async with session.post(f"{self.BASE_URL}{endpoint}", json=payload, headers=headers) as resp:
if resp.status != 200:
raise BackendError(
f"{self.__class__.__name__} error {resp.status}: {await resp.text()}"
)
return await resp.json()
async def _get(self, session: ClientSession, endpoint: str, params: dict) -> dict:
headers = {
"x-api-key": self._get_api_key(),
"user-agent": f"gpt-oss/{_GPT_OSS_VERSION}",
}
async with session.get(f"{self.BASE_URL}{endpoint}", params=params, headers=headers) as resp:
if resp.status != 200:
raise BackendError(
f"{self.__class__.__name__} error {resp.status}: {await resp.text()}"
)
return await resp.json()
@chz.chz(typecheck=True)
class ExaBackend(Backend):
"""Backend that uses the Exa Search API."""
source: str = chz.field(doc="Description of the backend source")
api_key: str | None = chz.field(
doc="Exa API key. Uses EXA_API_KEY environment variable if not provided.",
default=None,
)
BASE_URL: str = "https://api.exa.ai"
def _get_api_key(self) -> str:
key = self.api_key or os.environ.get("EXA_API_KEY")
if not key:
raise BackendError("Exa API key not provided")
return key
async def search(
self, query: str, topn: int, session: ClientSession
) -> PageContents:
data = await self._post(
session,
"/search",
{"query": query, "numResults": topn, "contents": {"text": True, "summary": True}},
)
# make a simple HTML page to work with browser format
titles_and_urls = [
(result["title"], result["url"], result["summary"])
for result in data["results"]
]
html_page = f"""
<html><body>
<h1>Search Results</h1>
<ul>
{"".join([f"<li><a href='{url}'>{title}</a> {summary}</li>" for title, url, summary in titles_and_urls])}
</ul>
</body></html>
"""
return process_html(
html=html_page,
url="",
title=query,
display_urls=True,
session=session,
)
async def fetch(self, url: str, session: ClientSession) -> PageContents:
is_view_source = url.startswith(VIEW_SOURCE_PREFIX)
if is_view_source:
url = url[len(VIEW_SOURCE_PREFIX) :]
data = await self._post(
session,
"/contents",
{"urls": [url], "text": { "includeHtmlTags": True }},
)
results = data.get("results", [])
if not results:
raise BackendError(f"No contents returned for {url}")
return process_html(
html=results[0].get("text", ""),
url=url,
title=results[0].get("title", ""),
display_urls=True,
session=session,
)
@chz.chz(typecheck=True)
class YouComBackend(Backend):
"""Backend that uses the You.com Search API."""
source: str = chz.field(doc="Description of the backend source")
BASE_URL: str = "https://api.ydc-index.io"
def _get_api_key(self) -> str:
key = os.environ.get("YDC_API_KEY")
if not key:
raise BackendError("You.com API key not provided")
return key
async def search(
self, query: str, topn: int, session: ClientSession
) -> PageContents:
data = await self._get(
session,
"/v1/search",
{"query": query, "count": topn},
)
# make a simple HTML page to work with browser format
web_titles_and_urls, news_titles_and_urls = [], []
if "web" in data["results"]:
web_titles_and_urls = [
(result["title"], result["url"], result["snippets"])
for result in data["results"]["web"]
]
if "news" in data["results"]:
news_titles_and_urls = [
(result["title"], result["url"], result["description"])
for result in data["results"]["news"]
]
titles_and_urls = web_titles_and_urls + news_titles_and_urls
html_page = f"""
<html><body>
<h1>Search Results</h1>
<ul>
{"".join([f"<li><a href='{url}'>{title}</a> {summary}</li>" for title, url, summary in titles_and_urls])}
</ul>
</body></html>
"""
return process_html(
html=html_page,
url="",
title=query,
display_urls=True,
session=session,
)
async def fetch(self, url: str, session: ClientSession) -> PageContents:
is_view_source = url.startswith(VIEW_SOURCE_PREFIX)
if is_view_source:
url = url[len(VIEW_SOURCE_PREFIX) :]
data = await self._post(
session,
"/v1/contents",
{"urls": [url], "livecrawl_formats": "html"},
)
if not data:
raise BackendError(f"No contents returned for {url}")
if "html" not in data[0]:
raise BackendError(f"No HTML returned for {url}")
return process_html(
html=data[0].get("html", ""),
url=url,
title=data[0].get("title", ""),
display_urls=True,
session=session,
)
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/tools/simple_browser/backend.py",
"license": "Apache License 2.0",
"lines": 225,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/tools/simple_browser/page_contents.py | """
Page contents for the simple browser tool.
"""
from __future__ import annotations
import dataclasses
import functools
import logging
import re
from urllib.parse import urljoin, urlparse
import aiohttp
import html2text
import lxml
import lxml.etree
import lxml.html
import pydantic
import tiktoken
logger = logging.getLogger(__name__)
HTML_SUP_RE = re.compile(r"<sup( [^>]*)?>([\w\-]+)</sup>")
HTML_SUB_RE = re.compile(r"<sub( [^>]*)?>([\w\-]+)</sub>")
HTML_TAGS_SEQ_RE = re.compile(r"(?<=\w)((<[^>]*>)+)(?=\w)")
WHITESPACE_ANCHOR_RE = re.compile(r"(【\@[^】]+】)(\s+)")
EMPTY_LINE_RE = re.compile(r"^\s+$", flags=re.MULTILINE)
EXTRA_NEWLINE_RE = re.compile(r"\n(\s*\n)+")
class Extract(pydantic.BaseModel): # A search result snippet or a quotable extract
url: str
text: str
title: str
line_idx: int | None = None
class FetchResult(pydantic.BaseModel):
url: str
success: bool
title: str | None = None
error_type: str | None = None
error_message: str | None = None
html: str | None = None
raw_content: bytes | None = None
plaintext: str | None = None
class PageContents(pydantic.BaseModel):
url: str
text: str
title: str
urls: dict[str, str]
snippets: dict[str, Extract] | None = None
error_message: str | None = None
@dataclasses.dataclass(frozen=True)
class Tokens:
tokens: list[int]
tok2idx: list[int] # Offsets = running sum of lengths.
def get_domain(url: str) -> str:
"""Extracts the domain from a URL."""
if "http" not in url:
# If `get_domain` is called on a domain, add a scheme so that the
# original domain is returned instead of the empty string.
url = "http://" + url
return urlparse(url).netloc
def multiple_replace(text: str, replacements: dict[str, str]) -> str:
"""Performs multiple string replacements using regex pass."""
regex = re.compile("(%s)" % "|".join(map(re.escape, replacements.keys())))
return regex.sub(lambda mo: replacements[mo.group(1)], text)
@functools.lru_cache(maxsize=1024)
def mark_lines(text: str) -> str:
"""Adds line numbers (ex: 'L0:') to the beginning of each line in a string."""
# Split the string by newline characters
lines = text.split("\n")
# Add lines numbers to each line and join into a single string
numbered_text = "\n".join([f"L{i}: {line}" for i, line in enumerate(lines)])
return numbered_text
@functools.cache
def _tiktoken_vocabulary_lengths(enc_name: str) -> list[int]:
"""Gets the character lengths of all tokens in the specified TikToken vocabulary."""
encoding = tiktoken.get_encoding(enc_name)
return [len(encoding.decode([i])) for i in range(encoding.n_vocab)]
def warmup_caches(enc_names: list[str]) -> None:
"""Warm up the cache by computing token length lists for the given TikToken encodings."""
for _ in map(_tiktoken_vocabulary_lengths, enc_names):
pass
def _replace_special_chars(text: str) -> str:
"""Replaces specific special characters with visually similar alternatives."""
replacements = {
"【": "〖",
"】": "〗",
"◼": "◾",
# "━": "─",
"\u200b": "", # zero width space
# Note: not replacing †
}
return multiple_replace(text, replacements)
def merge_whitespace(text: str) -> str:
"""Replace newlines with spaces and merge consecutive whitespace into a single space."""
text = text.replace("\n", " ")
text = re.sub(r"\s+", " ", text)
return text
def arxiv_to_ar5iv(url: str) -> str:
"""Converts an arxiv.org URL to its ar5iv.org equivalent."""
return re.sub(r"arxiv.org", r"ar5iv.org", url)
def _clean_links(root: lxml.html.HtmlElement, cur_url: str) -> dict[str, str]:
"""Processes all anchor tags in the HTML, replaces them with a custom format and returns an ID-to-URL mapping."""
cur_domain = get_domain(cur_url)
urls: dict[str, str] = {}
urls_rev: dict[str, str] = {}
for a in root.findall(".//a[@href]"):
assert a.getparent() is not None
link = a.attrib["href"]
if link.startswith(("mailto:", "javascript:")):
continue
text = _get_text(a).replace("†", "‡")
if not re.sub(r"【\@([^】]+)】", "", text): # Probably an image
continue
if link.startswith("#"):
replace_node_with_text(a, text)
continue
try:
link = urljoin(cur_url, link) # works with both absolute and relative links
domain = get_domain(link)
except Exception:
domain = ""
if not domain:
logger.debug("SKIPPING LINK WITH URL %s", link)
continue
link = arxiv_to_ar5iv(link)
if (link_id := urls_rev.get(link)) is None:
link_id = f"{len(urls)}"
urls[link_id] = link
urls_rev[link] = link_id
if domain == cur_domain:
replacement = f"【{link_id}†{text}】"
else:
replacement = f"【{link_id}†{text}†{domain}】"
replace_node_with_text(a, replacement)
return urls
def _get_text(node: lxml.html.HtmlElement) -> str:
"""Extracts all text from an HTML element and merges it into a whitespace-normalized string."""
return merge_whitespace(" ".join(node.itertext()))
def _remove_node(node: lxml.html.HtmlElement) -> None:
"""Removes a node from its parent in the lxml tree."""
node.getparent().remove(node)
def _escape_md(text: str) -> str:
return text
def _escape_md_section(text: str, snob: bool = False) -> str:
return text
def html_to_text(html: str) -> str:
"""Converts an HTML string to clean plaintext."""
html = re.sub(HTML_SUP_RE, r"^{\2}", html)
html = re.sub(HTML_SUB_RE, r"_{\2}", html)
# add spaces between tags such as table cells
html = re.sub(HTML_TAGS_SEQ_RE, r" \1", html)
# we don't need to escape markdown, so monkey-patch the logic
orig_escape_md = html2text.utils.escape_md
orig_escape_md_section = html2text.utils.escape_md_section
html2text.utils.escape_md = _escape_md
html2text.utils.escape_md_section = _escape_md_section
h = html2text.HTML2Text()
h.ignore_links = True
h.ignore_images = True
h.body_width = 0 # no wrapping
h.ignore_tables = True
h.unicode_snob = True
h.ignore_emphasis = True
result = h.handle(html).strip()
html2text.utils.escape_md = orig_escape_md
html2text.utils.escape_md_section = orig_escape_md_section
return result
def _remove_math(root: lxml.html.HtmlElement) -> None:
"""Removes all <math> elements from the lxml tree."""
for node in root.findall(".//math"):
_remove_node(node)
def remove_unicode_smp(text: str) -> str:
"""Removes Unicode characters in the Supplemental Multilingual Plane (SMP) from `text`.
SMP characters are not supported by lxml.html processing.
"""
smp_pattern = re.compile(r"[\U00010000-\U0001FFFF]", re.UNICODE)
return smp_pattern.sub("", text)
def replace_node_with_text(node: lxml.html.HtmlElement, text: str) -> None:
"""Replaces an lxml node with a text string while preserving surrounding text."""
previous = node.getprevious()
parent = node.getparent()
tail = node.tail or ""
if previous is None:
parent.text = (parent.text or "") + text + tail
else:
previous.tail = (previous.tail or "") + text + tail
parent.remove(node)
def replace_images(
root: lxml.html.HtmlElement,
base_url: str,
session: aiohttp.ClientSession | None,
) -> None:
"""Finds all image tags and replaces them with numbered placeholders (includes alt/title if available)."""
cnt = 0
for img_tag in root.findall(".//img"):
image_name = img_tag.get("alt", img_tag.get("title"))
if image_name:
replacement = f"[Image {cnt}: {image_name}]"
else:
replacement = f"[Image {cnt}]"
replace_node_with_text(img_tag, replacement)
cnt += 1
def process_html(
html: str,
url: str,
title: str | None,
session: aiohttp.ClientSession | None = None,
display_urls: bool = False,
) -> PageContents:
"""Convert HTML into model-readable version."""
html = remove_unicode_smp(html)
html = _replace_special_chars(html)
root = lxml.html.fromstring(html)
# Parse the title.
title_element = root.find(".//title")
if title:
final_title = title
elif title_element is not None:
final_title = title_element.text or ""
elif url and (domain := get_domain(url)):
final_title = domain
else:
final_title = ""
urls = _clean_links(root, url)
replace_images(
root=root,
base_url=url,
session=session,
)
_remove_math(root)
clean_html = lxml.etree.tostring(root, encoding="UTF-8").decode()
text = html_to_text(clean_html)
text = re.sub(WHITESPACE_ANCHOR_RE, lambda m: m.group(2) + m.group(1), text)
# ^^^ move anchors to the right thru whitespace
# This way anchors don't create extra whitespace
text = re.sub(EMPTY_LINE_RE, "", text)
# ^^^ Get rid of empty lines
text = re.sub(EXTRA_NEWLINE_RE, "\n\n", text)
# ^^^ Get rid of extra newlines
top_parts = []
if display_urls:
top_parts.append(f"\nURL: {url}\n")
# NOTE: Publication date is currently not extracted due
# to performance costs.
return PageContents(
url=url,
text="".join(top_parts) + text,
urls=urls,
title=final_title,
)
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/tools/simple_browser/page_contents.py",
"license": "Apache License 2.0",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/tools/simple_browser/simple_browser_tool.py | import contextvars
import dataclasses
import functools
import itertools
import json
import re
import textwrap
from typing import Any, AsyncIterator, Callable, ParamSpec, Sequence
from urllib.parse import quote, unquote
import pydantic
import structlog
import tiktoken
from aiohttp import ClientSession
from openai_harmony import (
Author,
Content,
Message,
Role,
TextContent,
ToolNamespaceConfig
)
from ..tool import Tool
# from functions import Function, from_python
from .backend import (
VIEW_SOURCE_PREFIX,
Backend,
BackendError,
maybe_truncate,
)
from .page_contents import Extract, PageContents
logger = structlog.stdlib.get_logger(component=__name__)
# TODO(zhuohan): Use the correct encoding at release
ENC_NAME = "o200k_base"
FIND_PAGE_LINK_FORMAT = "# 【{idx}†{title}】"
PARTIAL_INITIAL_LINK_PATTERN = re.compile(r"^[^【】]*】")
PARTIAL_FINAL_LINK_PATTERN = re.compile(
r"【\d*(?:†(?P<content>[^†】]*)(?:†[^†】]*)?)?$"
)
LINK_PATTERN = re.compile(r"【\d+†(?P<content>[^†】]+)(?:†[^†】]+)?】")
CITATION_OUTPUT_PATTERN = re.compile(r"【(?P<cursor>\d+)†(?P<content>[^†】]+)(?:†[^†】]+)?】")
CallParams = ParamSpec("CallParams")
_P = ParamSpec("_P")
_live_function_name = contextvars.ContextVar[str]("_live_function_name")
class ToolUsageError(Exception):
pass
def function_the_model_can_call(
fn: Callable[_P, AsyncIterator[Message]],
) -> Callable[_P, AsyncIterator[Message]]:
fn.__fn_calling_tool_fn_type__ = "function_the_model_can_call" # type: ignore
@functools.wraps(fn)
async def inner(*args: _P.args, **kwargs: _P.kwargs) -> AsyncIterator[Message]:
token = _live_function_name.set(fn.__name__)
try:
async for m in fn(*args, **kwargs):
yield m
finally:
_live_function_name.reset(token)
return inner
@functools.cache
def _tiktoken_vocabulary_lengths(enc_name: str) -> list[int]:
encoding = tiktoken.get_encoding(enc_name)
results = []
for i in range(encoding.n_vocab):
try:
results.append(len(encoding.decode([i])))
except Exception as e:
results.append(1)
return results
@dataclasses.dataclass(frozen=True)
class Tokens:
tokens: list[int]
tok2idx: list[int] # Offsets = running sum of lengths.
@functools.cache
def max_chars_per_token(enc_name: str) -> int:
"""Typical value is 128, but let's be safe."""
tok_lens = _tiktoken_vocabulary_lengths(enc_name)
return max(tok_lens)
def get_tokens(text: str, enc_name: str) -> Tokens:
encoding = tiktoken.get_encoding(enc_name)
tokens = encoding.encode(text, disallowed_special=())
_vocabulary_lengths = _tiktoken_vocabulary_lengths(enc_name)
tok2idx = [0] + list(itertools.accumulate(_vocabulary_lengths[i] for i in tokens))[
:-1
]
result = Tokens(tokens=tokens, tok2idx=tok2idx)
return result
def get_end_loc(
loc: int,
num_lines: int,
total_lines: int,
lines: list[str],
view_tokens: int,
encoding_name: str,
) -> int:
if num_lines <= 0:
# COMPUTE NUMBER OF LINES TO SHOW
txt = join_lines(lines[loc:], add_line_numbers=True, offset=loc)
# if the text is very short, no need to truncate at all
# at least one char per token
if len(txt) > view_tokens:
# limit the amount of text we tokenize here
upper_bound = max_chars_per_token(encoding_name)
tok2idx = get_tokens(
txt[: (view_tokens + 1) * upper_bound], encoding_name
).tok2idx
if len(tok2idx) > view_tokens:
end_idx = tok2idx[view_tokens]
num_lines = txt[:end_idx].count("\n") + 1 # round up
else:
num_lines = total_lines
else:
num_lines = total_lines
return min(loc + num_lines, total_lines)
def get_page_metadata(
curr_page: PageContents,
) -> dict[str, str | None | dict[str, str] | list[str]]:
"""Some attributes of the current page."""
page_metadata: dict[str, str | None | dict[str, str] | list[str]] = {
"url": curr_page.url,
"title": curr_page.title,
}
return page_metadata
def join_lines(
lines: list[str], add_line_numbers: bool = False, offset: int = 0
) -> str:
if add_line_numbers:
return "\n".join([f"L{i + offset}: {line}" for i, line in enumerate(lines)])
else:
return "\n".join(lines)
def wrap_lines(text: str, width: int = 80) -> list[str]:
lines = text.split("\n")
wrapped = itertools.chain.from_iterable(
(
textwrap.wrap(
line, width=width, replace_whitespace=False, drop_whitespace=False
)
if line
else [""]
) # preserve empty lines
for line in lines
)
return list(wrapped)
def strip_links(text: str) -> str:
text = re.sub(PARTIAL_INITIAL_LINK_PATTERN, "", text)
text = re.sub(PARTIAL_FINAL_LINK_PATTERN, lambda mo: mo.group("content"), text)
text = re.sub(LINK_PATTERN, lambda mo: mo.group("content"), text)
return text
def maybe_get_function_args(
message: Message, tool_name: str = "browser"
) -> dict[str, Any] | None:
if not message.recipient.startswith(f"{tool_name}."):
return None
contents = ""
if len(message.content) == 1 and isinstance(message.content[0], TextContent):
contents = message.content[0].text
if not contents:
return {}
try:
parsed_contents = json.loads(contents)
if isinstance(parsed_contents, dict):
return parsed_contents
except json.JSONDecodeError:
pass
return None
async def run_find_in_page(
pattern: str,
page: PageContents,
max_results: int = 50,
num_show_lines: int = 4,
) -> PageContents:
lines = wrap_lines(text=page.text)
txt = join_lines(lines, add_line_numbers=False)
without_links = strip_links(txt)
lines = without_links.split("\n")
result_chunks, snippets = [], []
line_idx, match_idx = 0, 0
while line_idx < len(lines):
line = lines[line_idx]
if pattern not in line.lower():
line_idx += 1
continue
snippet = "\n".join(lines[line_idx : line_idx + num_show_lines])
link_title = FIND_PAGE_LINK_FORMAT.format(
idx=f"{match_idx}", title=f"match at L{line_idx}"
)
result_chunks.append(f"{link_title}\n{snippet}")
snippets.append(
Extract(
url=page.url, text=snippet, title=f"#{match_idx}", line_idx=line_idx
)
)
if len(result_chunks) == max_results:
break
match_idx += 1
line_idx += num_show_lines
urls = [page.url for _ in result_chunks]
if result_chunks:
display_text = "\n\n".join(result_chunks)
else:
display_text = f"No `find` results for pattern: `{pattern}`"
result_page = PageContents(
url=f"{page.url}/find?pattern={quote(pattern)}",
title=f"Find results for text: `{pattern}` in `{page.title}`",
text=display_text,
urls={str(i): url for i, url in enumerate(urls)},
snippets={str(i): snip for i, snip in enumerate(snippets)},
)
return result_page
def handle_errors(
func: Callable[CallParams, AsyncIterator["Message"]],
) -> Callable[CallParams, AsyncIterator["Message"]]:
@functools.wraps(func)
async def inner(
*args: CallParams.args, **kwargs: CallParams.kwargs
) -> AsyncIterator[Message]:
tool = args[0]
# Could be cool to type this explicitly, but mypy makes it hard
assert isinstance(tool, SimpleBrowserTool)
try:
async for msg in func(*args, **kwargs):
yield msg
except (ToolUsageError, BackendError) as e:
yield tool.make_error_message(e)
return inner
class SimpleBrowserState(pydantic.BaseModel):
# maps page url to page contents
pages: dict[str, PageContents] = pydantic.Field(default_factory=dict)
# a sequential list of page urls
page_stack: list[str] = pydantic.Field(default_factory=list)
@property
def current_cursor(self) -> int:
return len(self.page_stack) - 1
def add_page(self, page: PageContents) -> None:
self.pages[page.url] = page
self.page_stack.append(page.url)
def get_page(self, cursor: int = -1) -> PageContents:
if self.current_cursor < 0:
raise ToolUsageError("No pages to access!")
if cursor == -1 or cursor == self.current_cursor:
return self.pages[self.page_stack[-1]]
try:
page_url = self.page_stack[cursor]
except TypeError as e:
raise ToolUsageError(
f"`cursor` should be an integer, not `{type(cursor).__name__}`"
) from e
except IndexError as e:
raise ToolUsageError(
f"Cursor `{cursor}` is out of range. "
f"Available cursor indices: [0 - {self.current_cursor}]."
) from e
return self.pages[page_url]
def get_page_by_url(self, url: str) -> PageContents | None:
if url in self.pages:
return self.pages[url]
return None
def pop_page_stack(self) -> None:
assert self.current_cursor >= 0, "No page to pop!"
self.page_stack.pop()
class SimpleBrowserTool(Tool):
def __init__(
self,
backend: Backend,
encoding_name: str = ENC_NAME,
max_search_results: int = 20,
tool_state: dict[str, Any] | None = None,
view_tokens: int = 1024,
name: str = "browser",
):
assert name == "browser"
self.backend = backend
if tool_state is None:
self.tool_state = SimpleBrowserState()
else:
self.tool_state = SimpleBrowserState.model_validate(tool_state)
self.encoding_name = encoding_name
self.max_search_results = max_search_results
self.view_tokens = view_tokens
def get_tool_state(self) -> dict[str, Any]:
return {"tool_state": self.tool_state.model_dump()}
@classmethod
def get_tool_name(cls) -> str:
return "browser"
@property
def name(self) -> str:
return self.get_tool_name()
@property
def tool_config(self) -> ToolNamespaceConfig:
config = ToolNamespaceConfig.browser()
config.name = self.name
config.description = """Tool for browsing.
The `cursor` appears in brackets before each browsing display: `[{cursor}]`.
Cite information from the tool using the following format:
`【{cursor}†L{line_start}(-L{line_end})?】`, for example: `【6†L9-L11】` or `【8†L3】`.
Do not quote more than 10 words directly from the tool output.
sources=""" + self.backend.source
return config
@property
def instruction(self) -> str:
return self.tool_config.description
def _render_browsing_display(
self,
tether_id: int,
result: str,
summary: str | None = None,
):
to_return = ""
# Always show summaries.
if summary:
to_return += summary
to_return += result
to_return = f"[{tether_id}] {to_return}"
return to_return
def _make_response(
self,
page: PageContents,
cursor: int,
body: str,
scrollbar: str,
) -> Message:
domain = maybe_truncate(unquote(page.url))
header = f"{page.title}"
if domain:
header += f" ({domain})"
header += f"\n**{scrollbar}**\n\n"
content = TextContent(text=self._render_browsing_display(cursor, body, header))
return self.make_response(
content=content, metadata=get_page_metadata(self.tool_state.get_page())
)
async def show_page(self, loc: int = 0, num_lines: int = -1) -> Message:
page = self.tool_state.get_page()
cursor = self.tool_state.current_cursor
lines = wrap_lines(text=page.text)
total_lines = len(lines)
if loc >= total_lines:
err_msg = (
f"Invalid location parameter: `{loc}`. "
f"Cannot exceed page maximum of {total_lines - 1}."
)
raise ToolUsageError(err_msg)
end_loc = get_end_loc(
loc, num_lines, total_lines, lines, self.view_tokens, self.encoding_name
)
lines_to_show = lines[loc:end_loc]
body = join_lines(lines_to_show, add_line_numbers=True, offset=loc)
scrollbar = f"viewing lines [{loc} - {end_loc - 1}] of {total_lines - 1}"
return self._make_response(page, cursor, body, scrollbar)
async def show_page_safely(self, loc: int = 0, num_lines: int = -1) -> Message:
try:
return await self.show_page(loc=loc, num_lines=num_lines)
except ToolUsageError as e:
self.tool_state.pop_page_stack()
raise e
async def _open_url(self, url: str, direct_url_open: bool) -> PageContents:
"""Use the cache, if available."""
backend = self.backend
# direct_url_open should be regarded as a refresh
if not direct_url_open and (page := self.tool_state.get_page_by_url(url)):
assert page.url == url
return page
try:
async with ClientSession() as session:
page = await backend.fetch(url, session=session)
return page
except Exception as e:
msg = maybe_truncate(str(e))
logger.warning("Error fetching URL in lean browser tool", exc_info=e)
raise BackendError(
f"Error fetching URL `{maybe_truncate(url)}`: {msg}"
) from e
def make_error_message(self, error: Exception) -> Message:
"""Uses the message creation codepath from the base class."""
error_name = error.__class__.__name__
content = TextContent(text=str(error))
return self.make_response(content=content)
@function_the_model_can_call
@handle_errors
async def search(
self,
query: str,
topn: int = 10,
top_n: int = 10,
source: str | None = None,
) -> AsyncIterator[Message]:
del topn
del top_n
try:
async with ClientSession() as session:
search_page = await self.backend.search(
query=query,
topn=self.max_search_results,
session=session,
)
except Exception as e:
msg = maybe_truncate(str(e))
raise BackendError(f"Error during search for `{query}`: {msg}") from e
self.tool_state.add_page(search_page)
yield await self.show_page_safely(loc=0)
@function_the_model_can_call
@handle_errors
async def open(
self,
id: int | str = -1,
cursor: int = -1,
loc: int = -1,
num_lines: int = -1,
view_source: bool = False,
source: str | None = None,
) -> AsyncIterator[Message]:
curr_page: PageContents | None = None
stay_on_current_page = False
direct_url_open = False
if isinstance(id, str):
snippet = None
url = id
direct_url_open = True
else: # Operate on a previously opened page
curr_page = self.tool_state.get_page(cursor)
if id >= 0: # click a link
try:
url = curr_page.urls[str(id)]
except KeyError as e:
raise ToolUsageError(f"Invalid link id `{id}`.") from e
snippet = (curr_page.snippets or {}).get(str(id))
if snippet and curr_page.url == "":
# current page is a search result page
assert isinstance(snippet, Extract)
else: # navigate to new position on the current page
if not view_source:
stay_on_current_page = True
url = curr_page.url
snippet = None
new_page: PageContents
if view_source:
url = f"{VIEW_SOURCE_PREFIX}{url}"
snippet = None
if stay_on_current_page:
assert curr_page is not None
new_page = curr_page
else:
new_page = await self._open_url(url, direct_url_open)
self.tool_state.add_page(new_page)
if loc < 0: # unset
if snippet is not None and snippet.line_idx is not None:
loc = snippet.line_idx
if loc > 4:
loc -= 4
else:
loc = 0
yield await self.show_page_safely(loc=loc, num_lines=num_lines)
@function_the_model_can_call
@handle_errors
async def find(self, pattern: str, cursor: int = -1) -> AsyncIterator[Message]:
page = self.tool_state.get_page(cursor)
if page.snippets is not None:
raise ToolUsageError(
"Cannot run `find` on search results page or find results page"
)
pc = await run_find_in_page(
pattern=str(pattern).lower(),
page=page,
)
self.tool_state.add_page(pc)
yield await self.show_page_safely(loc=0)
def make_response(
self,
content: Content,
*,
metadata: dict[str, Any] | None = None,
author: Author | None = None,
) -> Message:
"""
Make a response message.
Should be used from `@function_the_model_can_call` if author is not provided.
"""
if author is None:
tool_name = self.get_tool_name()
function_name = _live_function_name.get()
assert function_name is not None
author = Author(role=Role.TOOL, name=f"{tool_name}.{function_name}")
return Message(
author=author,
content=[content],
).with_recipient("assistant")
def process_arguments(self, message: Message) -> dict[str, Any]:
function_args = maybe_get_function_args(message, tool_name=self.name)
if function_args is None:
raise ValueError("Invalid function arguments")
if "cursor" in function_args and function_args["cursor"] >= 0:
page = self.tool_state.get_page(cursor=function_args["cursor"])
if "id" in function_args:
function_args["url"] = page.urls[str(function_args["id"])]
else:
function_args["url"] = page.url
elif "id" in function_args and isinstance(function_args["id"], str):
function_args["url"] = function_args["id"]
return function_args
async def _process(self, message: Message) -> AsyncIterator[Message]:
def make_error_message(error: str) -> Message:
return self.make_response(
content=TextContent(text=json.dumps({"error": error})),
author=Author(role=Role.TOOL, name=message.recipient),
)
function_args = maybe_get_function_args(message, tool_name=self.name)
if function_args is None:
yield make_error_message("Invalid function arguments")
return
_, function_name = message.recipient.split(".")
if function_name not in ["search", "open", "find"]:
yield make_error_message(f"Unknown function: {function_name}")
return
if function_name == "search":
async for msg in self.search(**function_args):
yield msg
elif function_name == "open":
async for msg in self.open(**function_args):
yield msg
elif function_name == "find":
async for msg in self.find(**function_args):
yield msg
else:
raise ValueError("should not be here")
def normalize_citations(self, old_content: str, hide_partial_citations: bool = False) -> tuple[str, list[dict[str, Any]], bool]:
"""
Returns a tuple of (new_message, annotations, has_partial_citations)
- new_message: Message with citations replaced by ([domain](url))
- annotations: list of dicts with start_index, end_index, and title (url)
- has_partial_citations: whether the text includes an unfinished citation
"""
has_partial_citations = PARTIAL_FINAL_LINK_PATTERN.search(old_content) is not None
if hide_partial_citations and has_partial_citations:
old_content = PARTIAL_FINAL_LINK_PATTERN.sub("", old_content)
matches = []
for match in CITATION_OUTPUT_PATTERN.finditer(old_content):
cursor = match.group("cursor")
content = match.group("content")
start_idx = match.start()
end_idx = match.end()
matches.append({
"cursor": cursor,
"content": content,
"start": start_idx,
"end": end_idx
})
# Build a mapping from cursor to url
cursor_to_url = {}
for idx, url in enumerate(self.tool_state.page_stack):
cursor_to_url[str(idx)] = url
def extract_domain(url):
try:
return unquote(url).split("/")[2]
except Exception:
return url
new_content = ""
last_idx = 0
annotations = []
running_offset = 0 # Offset due to length changes in replacements
for m in matches:
cursor = m["cursor"]
url = cursor_to_url.get(cursor, None)
orig_start = m["start"]
orig_end = m["end"]
# Add text before the citation
new_content += old_content[last_idx:orig_start]
if url:
domain = extract_domain(url)
replacement = f" ([{domain}]({url})) "
# The start and end indices in the new content
start_index = len(new_content)
end_index = start_index + len(replacement)
annotations.append({
"start_index": start_index,
"end_index": end_index,
"title": domain,
"url": url,
"type": "url_citation",
})
new_content += replacement
else:
# Keep the original citation format if cursor is missing
replacement = old_content[orig_start:orig_end]
start_index = len(new_content)
end_index = start_index + len(replacement)
# No annotation for missing url, but could add if desired
new_content += replacement
last_idx = orig_end
new_content += old_content[last_idx:]
return new_content, annotations, has_partial_citations
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/tools/simple_browser/simple_browser_tool.py",
"license": "Apache License 2.0",
"lines": 588,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/tools/tool.py | from abc import ABC, abstractmethod
from uuid import UUID, uuid4
from typing import AsyncIterator
from openai_harmony import (
Author,
Role,
Message,
TextContent,
)
def _maybe_update_inplace_and_validate_channel(
*, input_message: Message, tool_message: Message
) -> None:
# If the channel of a new message produced by tool is different from the originating message,
# we auto-set the new message's channel, if unset, or raise an error.
if tool_message.channel != input_message.channel:
if tool_message.channel is None:
tool_message.channel = input_message.channel
else:
raise ValueError(
f"Messages from tool should have the same channel ({tool_message.channel=}) as "
f"the triggering message ({input_message.channel=})."
)
class Tool(ABC):
"""
Something the model can call.
Tools expose APIs that are shown to the model in a syntax that the model
understands and knows how to call (from training data). Tools allow the
model to do things like run code, browse the web, etc.
"""
@property
@abstractmethod
def name(self) -> str:
"""
An identifier for the tool. The convention is that a message will be routed to the tool
whose name matches its recipient field.
"""
@property
def output_channel_should_match_input_channel(self) -> bool:
"""
A flag which indicates whether the output channel of the tool should match the input channel.
"""
return True
async def process(self, message: Message) -> AsyncIterator[Message]:
"""
Process the message and return a list of messages to add to the conversation.
The input message should already be applicable to this tool.
Don't return the input message, just the new messages.
If implementing a tool that has to block while calling a function use `call_on_background_thread` to get a coroutine.
If you just want to test this use `evaluate_generator` to get the results.
Do not override this method; override `_process` below (to avoid interfering with tracing).
"""
async for m in self._process(message):
if self.output_channel_should_match_input_channel:
_maybe_update_inplace_and_validate_channel(input_message=message, tool_message=m)
yield m
@abstractmethod
async def _process(self, message: Message) -> AsyncIterator[Message]:
"""Override this method to provide the implementation of the tool."""
if False: # This is to convince the type checker that this is an async generator.
yield # type: ignore[unreachable]
_ = message # Stifle "unused argument" warning.
raise NotImplementedError
@abstractmethod
def instruction(self) -> str:
"""
Describe the tool's functionality. For example, if it accepts python-formatted code,
provide documentation on the functions available.
"""
raise NotImplementedError
def instruction_dict(self) -> dict[str, str]:
return {self.name: self.instruction()}
def error_message(
self, error_message: str, id: UUID | None = None, channel: str | None = None
) -> Message:
"""
Return an error message that's from this tool.
"""
return Message(
id=id if id else uuid4(),
author=Author(role=Role.TOOL, name=self.name),
content=TextContent(text=error_message), # TODO: Use SystemError instead
channel=channel,
).with_recipient("assistant")
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/tools/tool.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/torch/model.py | import json
import math
import os
from dataclasses import dataclass
import torch
import torch.distributed as dist
from gpt_oss.torch.weights import Checkpoint
@dataclass
class ModelConfig:
num_hidden_layers: int = 36
num_experts: int = 128
experts_per_token: int = 4
vocab_size: int = 201088
hidden_size: int = 2880
intermediate_size: int = 2880
swiglu_limit: float = 7.0
head_dim: int = 64
num_attention_heads: int = 64
num_key_value_heads: int = 8
sliding_window: int = 128
initial_context_length: int = 4096
rope_theta: float = 150000.0
rope_scaling_factor: float = 32.0
rope_ntk_alpha: float = 1.0
rope_ntk_beta: float = 32.0
class RMSNorm(torch.nn.Module):
def __init__(
self, num_features: int, eps: float = 1e-05, device: torch.device | None = None
):
super().__init__()
self.num_features = num_features
self.eps = eps
self.scale = torch.nn.Parameter(
torch.ones(num_features, device=device, dtype=torch.float32)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
assert x.shape[-1] == self.num_features
t, dtype = x.float(), x.dtype
t = t * torch.rsqrt(torch.mean(t**2, dim=-1, keepdim=True) + self.eps)
return (t * self.scale).to(dtype)
def _apply_rotary_emb(
x: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
) -> torch.Tensor:
cos = cos.unsqueeze(-2).to(x.dtype)
sin = sin.unsqueeze(-2).to(x.dtype)
x1, x2 = torch.chunk(x, 2, dim=-1)
o1 = x1 * cos - x2 * sin
o2 = x2 * cos + x1 * sin
return torch.cat((o1, o2), dim=-1)
class RotaryEmbedding(torch.nn.Module):
def __init__(
self,
head_dim: int,
base: int,
dtype: torch.dtype,
initial_context_length: int = 4096,
scaling_factor: float = 1.0,
ntk_alpha: float = 1.0,
ntk_beta: float = 32.0,
device: torch.device | None = None,
) -> None:
super().__init__()
self.head_dim = head_dim
self.base = base
self.dtype = dtype
self.initial_context_length = initial_context_length
self.scaling_factor = scaling_factor
self.ntk_alpha = ntk_alpha
self.ntk_beta = ntk_beta
self.device = device
def _compute_concentration_and_inv_freq(self) -> torch.Tensor:
"""See YaRN paper: https://arxiv.org/abs/2309.00071"""
freq = self.base ** (
torch.arange(0, self.head_dim, 2, dtype=torch.float, device=self.device)
/ self.head_dim
)
if self.scaling_factor > 1.0:
concentration = (
0.1 * math.log(self.scaling_factor) + 1.0
) # YaRN concentration
d_half = self.head_dim / 2
# NTK by parts
low = (
d_half
* math.log(self.initial_context_length / (self.ntk_beta * 2 * math.pi))
/ math.log(self.base)
)
high = (
d_half
* math.log(self.initial_context_length / (self.ntk_alpha * 2 * math.pi))
/ math.log(self.base)
)
assert 0 < low < high < d_half - 1
interpolation = 1.0 / (self.scaling_factor * freq)
extrapolation = 1.0 / freq
ramp = (
torch.arange(d_half, dtype=torch.float32, device=freq.device) - low
) / (high - low)
mask = 1 - ramp.clamp(0, 1)
inv_freq = interpolation * (1 - mask) + extrapolation * mask
else:
concentration = 1.0
inv_freq = 1.0 / freq
return concentration, inv_freq
def _compute_cos_sin(self, num_tokens: int):
concentration, inv_freq = self._compute_concentration_and_inv_freq()
t = torch.arange(num_tokens, dtype=torch.float32, device=self.device)
freqs = torch.einsum("i,j->ij", t, inv_freq)
cos = freqs.cos() * concentration
sin = freqs.sin() * concentration
return cos, sin
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
num_tokens = query.shape[0]
cos, sin = self._compute_cos_sin(num_tokens)
query_shape = query.shape
query = query.view(num_tokens, -1, self.head_dim)
query = _apply_rotary_emb(query, cos, sin)
query = query.reshape(query_shape)
key_shape = key.shape
key = key.view(num_tokens, -1, self.head_dim)
key = _apply_rotary_emb(key, cos, sin)
key = key.reshape(key_shape)
return query, key
def sdpa(Q, K, V, S, sm_scale, sliding_window=0):
# sliding_window == 0 means no sliding window
n_tokens, n_heads, q_mult, d_head = Q.shape
assert K.shape == (n_tokens, n_heads, d_head)
assert V.shape == (n_tokens, n_heads, d_head)
K = K[:, :, None, :].expand(-1, -1, q_mult, -1)
V = V[:, :, None, :].expand(-1, -1, q_mult, -1)
S = S.reshape(n_heads, q_mult, 1, 1).expand(-1, -1, n_tokens, -1)
mask = torch.triu(Q.new_full((n_tokens, n_tokens), -float("inf")), diagonal=1)
if sliding_window > 0:
mask += torch.tril(
mask.new_full((n_tokens, n_tokens), -float("inf")), diagonal=-sliding_window
)
QK = torch.einsum("qhmd,khmd->hmqk", Q, K)
QK *= sm_scale
QK += mask[None, None, :, :]
QK = torch.cat([QK, S], dim=-1)
W = torch.softmax(QK, dim=-1)
W = W[..., :-1]
attn = torch.einsum("hmqk,khmd->qhmd", W, V)
return attn.reshape(n_tokens, -1)
class AttentionBlock(torch.nn.Module):
def __init__(
self,
config: ModelConfig,
layer_idx: int = 0,
device: torch.device | None = None,
):
super().__init__()
self.head_dim = config.head_dim
self.num_attention_heads = config.num_attention_heads
self.num_key_value_heads = config.num_key_value_heads
# Only apply sliding window to every other layer
self.sliding_window = config.sliding_window if layer_idx % 2 == 0 else 0
self.sinks = torch.nn.Parameter(
torch.empty(config.num_attention_heads, device=device, dtype=torch.bfloat16)
)
self.norm = RMSNorm(config.hidden_size, device=device)
qkv_dim = config.head_dim * (
config.num_attention_heads + 2 * config.num_key_value_heads
)
self.qkv = torch.nn.Linear(
config.hidden_size, qkv_dim, device=device, dtype=torch.bfloat16
)
self.out = torch.nn.Linear(
config.head_dim * config.num_attention_heads,
config.hidden_size,
device=device,
dtype=torch.bfloat16,
)
self.sm_scale = 1 / math.sqrt(config.head_dim)
self.rope = RotaryEmbedding(
config.head_dim,
config.rope_theta,
torch.float32,
initial_context_length=config.initial_context_length,
scaling_factor=config.rope_scaling_factor,
ntk_alpha=config.rope_ntk_alpha,
ntk_beta=config.rope_ntk_beta,
device=device,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
t = self.norm(x)
qkv = self.qkv(t)
q = qkv[:, : self.num_attention_heads * self.head_dim].contiguous()
k = qkv[
:,
self.num_attention_heads
* self.head_dim : (self.num_attention_heads + self.num_key_value_heads)
* self.head_dim,
].contiguous()
v = qkv[
:,
(self.num_attention_heads + self.num_key_value_heads)
* self.head_dim : (self.num_attention_heads + 2 * self.num_key_value_heads)
* self.head_dim,
].contiguous()
q = q.view(
-1,
self.num_key_value_heads,
self.num_attention_heads // self.num_key_value_heads,
self.head_dim,
)
k = k.view(-1, self.num_key_value_heads, self.head_dim)
v = v.view(-1, self.num_key_value_heads, self.head_dim)
q, k = self.rope(q, k)
t = sdpa(q, k, v, self.sinks, self.sm_scale, self.sliding_window)
t = self.out(t)
t = x + t
return t
def swiglu(x, alpha: float = 1.702, limit: float = 7.0):
x_glu, x_linear = x[..., ::2], x[..., 1::2]
# Clamp the input values
x_glu = x_glu.clamp(min=None, max=limit)
x_linear = x_linear.clamp(min=-limit, max=limit)
out_glu = x_glu * torch.sigmoid(alpha * x_glu)
# Note we add an extra bias of 1 to the linear layer
return out_glu * (x_linear + 1)
class MLPBlock(torch.nn.Module):
def __init__(
self,
config: ModelConfig,
device: torch.device | None = None,
):
super().__init__()
self.num_experts = config.num_experts
self.experts_per_token = config.experts_per_token
self.swiglu_limit = config.swiglu_limit
self.world_size = dist.get_world_size() if dist.is_initialized() else 1
self.norm = RMSNorm(config.hidden_size, device=device)
self.gate = torch.nn.Linear(
config.hidden_size, config.num_experts, device=device, dtype=torch.bfloat16
)
assert config.intermediate_size % self.world_size == 0
self.mlp1_weight = torch.nn.Parameter(
torch.empty(
(
config.num_experts,
config.intermediate_size * 2 // self.world_size,
config.hidden_size,
),
device=device,
dtype=torch.bfloat16,
)
)
self.mlp1_bias = torch.nn.Parameter(
torch.empty(
(config.num_experts, config.intermediate_size * 2 // self.world_size),
device=device,
dtype=torch.bfloat16,
)
)
self.mlp2_weight = torch.nn.Parameter(
torch.empty(
(
config.num_experts,
config.hidden_size,
config.intermediate_size // self.world_size,
),
device=device,
dtype=torch.bfloat16,
)
)
self.mlp2_bias = torch.nn.Parameter(
torch.empty(
(config.num_experts, config.hidden_size),
device=device,
dtype=torch.bfloat16,
)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
t = self.norm(x)
g = self.gate(t)
experts = torch.topk(g, k=self.experts_per_token, dim=-1, sorted=True)
expert_weights = torch.nn.functional.softmax(experts.values, dim=1)
expert_indices = experts.indices
# MLP #1
mlp1_weight = self.mlp1_weight[expert_indices, ...]
mlp1_bias = self.mlp1_bias[expert_indices, ...]
t = torch.einsum("beck,bk->bec", mlp1_weight, t) + mlp1_bias
t = swiglu(t, limit=self.swiglu_limit)
# MLP #2
mlp2_weight = self.mlp2_weight[expert_indices, ...]
mlp2_bias = self.mlp2_bias[expert_indices, ...]
t = torch.einsum("beck,bek->bec", mlp2_weight, t)
if self.world_size > 1:
dist.all_reduce(t, op=dist.ReduceOp.SUM)
t += mlp2_bias
# Weighted sum of experts
t = torch.einsum("bec,be->bc", t, expert_weights)
return x + t
class TransformerBlock(torch.nn.Module):
def __init__(
self,
config: ModelConfig,
layer_idx: int,
device: torch.device | None = None,
):
super().__init__()
self.layer_idx = layer_idx
self.attn = AttentionBlock(config, layer_idx, device)
self.mlp = MLPBlock(config, device)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.attn(x)
x = self.mlp(x)
return x
class Transformer(torch.nn.Module):
def __init__(
self,
config: ModelConfig,
device: torch.device | None = None,
):
super().__init__()
self.embedding = torch.nn.Embedding(
config.vocab_size, config.hidden_size, device=device, dtype=torch.bfloat16
)
self.block = torch.nn.ModuleList(
[
TransformerBlock(config, layer_idx, device)
for layer_idx in range(config.num_hidden_layers)
]
)
self.norm = RMSNorm(config.hidden_size, device=device)
self.unembedding = torch.nn.Linear(
config.hidden_size,
config.vocab_size,
bias=False,
device=device,
dtype=torch.bfloat16,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.embedding(x)
for block in self.block:
x = block(x)
x = self.norm(x)
x = self.unembedding(x)
return x
@staticmethod
def from_checkpoint(
path: str, device: str | torch.device = "cuda"
) -> "Transformer":
if not isinstance(device, torch.device):
device = torch.device(device)
config_path = os.path.join(path, "config.json")
with open(config_path, "r") as f:
json_config = json.load(f)
config = ModelConfig(**json_config)
model = Transformer(
config=config,
device=device,
)
model.eval()
# Load weights
my_rank = dist.get_rank() if dist.is_initialized() else 0
world_size = dist.get_world_size() if dist.is_initialized() else 1
per_rank_intermediate_size = config.intermediate_size // world_size
checkpoint = Checkpoint(path, device)
for name, param in model.named_parameters():
loaded_tensor = checkpoint.get(name)
# Note: it would be more efficient to do sharding before upcasting from MXFP4,
# but for simplicity we do it after.
if "mlp1" in name: # both weight and bias
loaded_tensor = loaded_tensor[
:,
my_rank * 2
* per_rank_intermediate_size : (my_rank + 1) * 2
* per_rank_intermediate_size,
...,
]
elif "mlp2_weight" in name: # only weight
loaded_tensor = loaded_tensor[
...,
my_rank
* per_rank_intermediate_size : (my_rank + 1)
* per_rank_intermediate_size,
]
try:
param.data.copy_(loaded_tensor)
except:
print(f"{name=} {param.data.shape=} {loaded_tensor.shape=}")
raise
return model
class TokenGenerator:
@torch.inference_mode()
def __init__(self, checkpoint: str, device: torch.device):
self.device = device
self.model = Transformer.from_checkpoint(checkpoint, device=self.device)
@torch.inference_mode()
def generate(self,
prompt_tokens: list[int],
stop_tokens: list[int],
temperature: float = 1.0,
max_tokens: int = 0,
return_logprobs: bool = False):
tokens = list(prompt_tokens)
num_generated_tokens = 0
while max_tokens == 0 or num_generated_tokens < max_tokens:
logits = self.model(torch.as_tensor(tokens, dtype=torch.int32, device=self.device))[-1]
if temperature == 0.0:
predicted_token = torch.argmax(logits, dim=-1).item()
else:
probs = torch.softmax(logits * (1.0 / temperature), dim=-1)
predicted_token = torch.multinomial(probs, num_samples=1).item()
tokens.append(predicted_token)
num_generated_tokens += 1
if return_logprobs:
logprobs = torch.log_softmax(logits, dim=-1)
selected_logprobs = logprobs[predicted_token].item()
yield predicted_token, selected_logprobs
else:
yield predicted_token
if predicted_token in stop_tokens:
break
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/torch/model.py",
"license": "Apache License 2.0",
"lines": 422,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/torch/utils.py | import os
import torch
import torch.distributed as dist
def suppress_output(rank):
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if force:
builtin_print("rank #%d:" % rank, *args, **kwargs)
elif rank == 0:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed() -> torch.device:
"""Initialize the model for distributed inference."""
# Initialize distributed inference
world_size = int(os.environ.get("WORLD_SIZE", 1))
rank = int(os.environ.get("RANK", 0))
if world_size > 1:
dist.init_process_group(
backend="nccl", init_method="env://", world_size=world_size, rank=rank
)
torch.cuda.set_device(rank)
device = torch.device(f"cuda:{rank}")
# Warm up NCCL to avoid first-time latency
if world_size > 1:
x = torch.ones(1, device=device)
dist.all_reduce(x)
torch.cuda.synchronize(device)
suppress_output(rank)
return device
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/torch/utils.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt_oss/torch/weights.py | import math
import os
import torch
from safetensors import safe_open
# Bytes per MXFP4 block: 32 FP4 numbers packed in 16 bytes
BYTES_PER_BLOCK = 16
FP4_VALUES = [
+0.0, +0.5, +1.0, +1.5, +2.0, +3.0, +4.0, +6.0,
-0.0, -0.5, -1.0, -1.5, -2.0, -3.0, -4.0, -6.0,
]
# Map the names assumed in this implementation to the checkpoint names.
PARAM_NAME_MAP = {
f"block.{n}.mlp.mlp1_bias": f"block.{n}.mlp.mlp1_bias" for n in range(36)
} | {
f"block.{n}.mlp.mlp1_weight": (f"block.{n}.mlp.mlp1_weight.blocks", f"block.{n}.mlp.mlp1_weight.scales") for n in range(36)
} | {
f"block.{n}.mlp.mlp2_bias": f"block.{n}.mlp.mlp2_bias" for n in range(36)
} | {
f"block.{n}.mlp.mlp2_weight": (f"block.{n}.mlp.mlp2_weight.blocks", f"block.{n}.mlp.mlp2_weight.scales") for n in range(36)
}
class Checkpoint:
def __init__(self, path: str, device: torch.device):
device_str = (
device.type
if device.index is None
else device.type + ":" + str(device.index)
)
self.device_str = device_str
# Read from all files ending with .safetensors in the checkpoint directory
safetensor_files = [
os.path.join(path, fname)
for fname in os.listdir(path)
if fname.endswith(".safetensors")
]
# Build a mapping from tensor name to (file, key)
tensor_name_to_file = {}
for safetensor_file in safetensor_files:
with safe_open(safetensor_file, framework="pt", device=device_str) as f:
for key in f.keys():
tensor_name_to_file[key] = safetensor_file
self.tensor_name_to_file = tensor_name_to_file
def get(self, name: str) -> torch.Tensor:
match PARAM_NAME_MAP.get(name, name):
case (blocks_name, scales_name):
# MoE weights: are in block-based MXFP4 format
return self._get_mxfp4_tensor(blocks_name, scales_name, dtype=torch.bfloat16)
case tensor_name:
# MoE biases and other weights
return self._get_tensor(tensor_name)
def _get_tensor(self, name: str) -> str:
assert name in self.tensor_name_to_file, f"Tensor {name} not found in checkpoint."
with safe_open(
self.tensor_name_to_file[name], framework="pt", device=self.device_str
) as f:
return f.get_tensor(name)
def _get_mxfp4_tensor(
self,
blocks_name: str,
scales_name: str,
*,
dtype: torch.dtype = torch.bfloat16,
rows_per_chunk: int = 16384 * 512,
) -> torch.Tensor:
assert blocks_name in self.tensor_name_to_file, (
f"Blocks tensor {blocks_name} not found in checkpoint."
)
assert scales_name in self.tensor_name_to_file, (
f"Scales tensor {scales_name} not found in checkpoint."
)
blocks = self._get_tensor(blocks_name)
scales = self._get_tensor(scales_name).to(torch.int32) - 127
assert blocks.shape[:-1] == scales.shape, (
f"{blocks.shape=} does not match {scales.shape=}"
)
lut = torch.tensor(FP4_VALUES, dtype=dtype, device=blocks.device)
*prefix_shape, G, B = blocks.shape
rows_total = math.prod(prefix_shape) * G
blocks = blocks.reshape(rows_total, B)
scales = scales.reshape(rows_total, 1)
out = torch.empty(rows_total, B * 2, dtype=dtype, device=blocks.device)
for r0 in range(0, rows_total, rows_per_chunk):
r1 = min(r0 + rows_per_chunk, rows_total)
blk = blocks[r0:r1]
exp = scales[r0:r1]
# nibble indices -> int64
idx_lo = (blk & 0x0F).to(torch.long)
idx_hi = (blk >> 4).to(torch.long)
sub = out[r0:r1]
sub[:, 0::2] = lut[idx_lo]
sub[:, 1::2] = lut[idx_hi]
torch.ldexp(sub, exp, out=sub)
del idx_lo, idx_hi, blk, exp
return out.reshape(*prefix_shape, G, B * 2).view(*prefix_shape, G * B * 2)
def _get_mxfp4_tensor_copy(self, blocks_name: str, scales_name: str, dtype: torch.dtype = torch.bfloat16):
"short version that uses a lot of memory"
loaded_blocks = self._get_tensor(blocks_name)
# Split it into low and high nibbles, upcast to bytes, and interleave (for swiglu)
loaded_blocks_lo = loaded_blocks & 0x0F
loaded_blocks_hi = loaded_blocks >> 4
loaded_blocks = torch.stack((loaded_blocks_lo, loaded_blocks_hi), dim=-1)
loaded_blocks = loaded_blocks.view(*loaded_blocks.shape[:-2], loaded_blocks.shape[-2] * 2)
loaded_scales = self._get_tensor(scales_name)
# Upcast to int32 and subtract bias
loaded_scales = loaded_scales.int() - 127
# Convert MXFP4 numbers into target dtype
fp4_values = torch.tensor(FP4_VALUES, dtype=dtype, device=self.device_str)
loaded_tensor = torch.ldexp(fp4_values[loaded_blocks.int()], loaded_scales.unsqueeze(-1))
loaded_tensor = loaded_tensor.view(*loaded_tensor.shape[:-2], -1)
return loaded_tensor
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/torch/weights.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/triton/model.py | import json
import math
import os
import torch
from torch.profiler import record_function
from gpt_oss.torch.model import ModelConfig, RMSNorm
from gpt_oss.torch.weights import Checkpoint
from gpt_oss.triton.attention import attention, attention_ref
from gpt_oss.triton.moe import quantize_mx4, moe
class RotaryEmbedding(torch.nn.Module):
def __init__(
self,
head_dim: int,
base: int,
dtype: torch.dtype,
initial_context_length: int = 4096,
max_context_length: int = 131072,
scaling_factor: float = 1.0,
ntk_alpha: float = 1.0,
ntk_beta: float = 32.0,
device: torch.device | None = None,
) -> None:
super().__init__()
self.head_dim = head_dim
self.base = base
self.dtype = dtype
self.initial_context_length = initial_context_length
self.max_context_length = max_context_length
self.scaling_factor = scaling_factor
self.ntk_alpha = ntk_alpha
self.ntk_beta = ntk_beta
self.device = device
self.cos, self.sin = self._compute_cos_sin(0, self.max_context_length)
def _compute_concentration_and_inv_freq(self) -> torch.Tensor:
"""See YaRN paper: https://arxiv.org/abs/2309.00071"""
freq = self.base ** (
torch.arange(0, self.head_dim, 2, dtype=torch.float, device=self.device)
/ self.head_dim
)
if self.scaling_factor > 1.0:
concentration = (
0.1 * math.log(self.scaling_factor) + 1.0
) # YaRN concentration
d_half = self.head_dim / 2
# NTK by parts
low = (
d_half
* math.log(self.initial_context_length / (self.ntk_beta * 2 * math.pi))
/ math.log(self.base)
)
high = (
d_half
* math.log(self.initial_context_length / (self.ntk_alpha * 2 * math.pi))
/ math.log(self.base)
)
assert 0 < low < high < d_half - 1
interpolation = 1.0 / (self.scaling_factor * freq)
extrapolation = 1.0 / freq
ramp = (
torch.arange(d_half, dtype=torch.float32, device=freq.device) - low
) / (high - low)
mask = 1 - ramp.clamp(0, 1)
inv_freq = interpolation * (1 - mask) + extrapolation * mask
else:
concentration = 1.0
inv_freq = 1.0 / freq
return concentration, inv_freq
def _compute_cos_sin(self, start: int, num_tokens: int):
concentration, inv_freq = self._compute_concentration_and_inv_freq()
t = torch.arange(start, start + num_tokens, dtype=torch.float32, device=self.device)
freqs = torch.einsum("i,j->ij", t, inv_freq)
cos = freqs.cos() * concentration
sin = freqs.sin() * concentration
return cos, sin
@record_function("rotate")
def _rotate(
self,
x: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
) -> torch.Tensor:
cos = cos[None, :, None, :].to(x.dtype)
sin = sin[None, :, None, :].to(x.dtype)
x1, x2 = torch.chunk(x, 2, dim=-1)
o1 = x1 * cos - x2 * sin
o2 = x2 * cos + x1 * sin
return torch.cat((o1, o2), dim=-1)
@record_function("rope")
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
offset: torch.LongTensor,
) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, num_tokens, num_heads, head_dim = query.shape
batch_size, num_tokens, num_key_value_heads, head_dim = key.shape
idx = torch.arange(num_tokens, device=query.device, dtype=torch.long) + offset
idx = idx % self.max_context_length
cos = self.cos.index_select(0, idx)
sin = self.sin.index_select(0, idx)
query = self._rotate(query, cos, sin)
key = self._rotate(key, cos, sin)
return query, key
class Cache:
def __init__(self, batch_size, n_ctx, n_kv_heads, d_head=64, device: torch.device | None = None):
self.k = torch.zeros((batch_size, n_ctx, n_kv_heads, d_head), dtype=torch.bfloat16, device=device)
self.v = torch.zeros((batch_size, n_ctx, n_kv_heads, d_head), dtype=torch.bfloat16, device=device)
self.offset = torch.zeros((1,), dtype=torch.long, device=device)
def reset(self):
self.k.zero_()
self.v.zero_()
self.offset.zero_()
def repeat_interleave(self, n):
"""Repeat each cache entry n times along the batch dimension."""
self.k = self.k.repeat_interleave(n, dim=0)
self.v = self.v.repeat_interleave(n, dim=0)
def truncate(self, n_ctx):
"""Truncate the cache to the first n_ctx tokens."""
batch_size, _, n_kv_heads, d_head = self.k.shape
assert batch_size == self.v.shape[0]
assert n_ctx <= self.k.shape[1]
self.k[:, n_ctx:, :, :].zero_()
self.v[:, n_ctx:, :, :].zero_()
self.offset.fill_(n_ctx)
return self.k, self.v
def extend(self, k, v):
batch_size, n_ctx, *_rest = k.shape
assert batch_size == self.k.shape[0]
indices = torch.arange(0, n_ctx, device=k.device, dtype=torch.long) + self.offset
self.k.index_copy_(1, indices, k)
self.v.index_copy_(1, indices, v)
self.offset.add_(n_ctx)
return self.k, self.v
class AttentionBlock(torch.nn.Module):
def __init__(
self,
config: ModelConfig,
layer_idx: int = 0,
device: torch.device | None = None,
):
super().__init__()
self.head_dim = config.head_dim
self.num_attention_heads = config.num_attention_heads
self.num_key_value_heads = config.num_key_value_heads
# Only apply sliding window to every other layer
self.sliding_window = config.sliding_window if layer_idx % 2 == 0 else 0
self.layer_idx = layer_idx
self.sinks = torch.nn.Parameter(
torch.empty(config.num_attention_heads, device=device, dtype=torch.bfloat16)
)
self.norm = RMSNorm(config.hidden_size, device=device)
qkv_dim = config.head_dim * (
config.num_attention_heads + 2 * config.num_key_value_heads
)
self.qkv = torch.nn.Linear(
config.hidden_size, qkv_dim, device=device, dtype=torch.bfloat16
)
self.out = torch.nn.Linear(
config.head_dim * config.num_attention_heads,
config.hidden_size,
device=device,
dtype=torch.bfloat16,
)
self.sm_scale = 1 / math.sqrt(config.head_dim)
self.rope = RotaryEmbedding(
config.head_dim,
config.rope_theta,
torch.float32,
initial_context_length=config.initial_context_length,
scaling_factor=config.rope_scaling_factor,
ntk_alpha=config.rope_ntk_alpha,
ntk_beta=config.rope_ntk_beta,
device=device,
)
@record_function("attn")
def forward(self, x: torch.Tensor, cache: Cache | None = None) -> torch.Tensor:
batch_size, n_ctx, dim = x.shape
t = self.norm(x)
with record_function("qkv"):
qkv = self.qkv(t)
qkv_parts = (
self.num_attention_heads * self.head_dim,
self.num_key_value_heads * self.head_dim,
self.num_key_value_heads * self.head_dim
)
q, k, v = torch.split(qkv, qkv_parts, dim=-1)
q, k, v = q.contiguous(), k.contiguous(), v.contiguous()
q = q.view(batch_size, n_ctx, self.num_attention_heads, self.head_dim)
k = k.view(batch_size, n_ctx, self.num_key_value_heads, self.head_dim)
v = v.view(batch_size, n_ctx, self.num_key_value_heads, self.head_dim)
if cache is not None:
offset = cache.offset.clone()
q, k = self.rope(q, k, offset=offset)
k, v = cache.extend(k, v)
else:
offset = torch.zeros((1,), dtype=torch.long, device=x.device)
q, k = self.rope(q, k, offset=offset)
q = q.view(
batch_size,
n_ctx,
self.num_attention_heads // self.num_key_value_heads,
self.num_key_value_heads,
self.head_dim,
)
with record_function("attn_kernel"):
if n_ctx == 1:
t = attention_ref(
q,
k,
v,
self.sinks,
self.sm_scale,
self.sliding_window,
offset,
)
else:
t = attention(
q,
k,
v,
self.sinks,
self.sm_scale,
self.sliding_window,
offset,
)
if n_ctx < 64:
t1 = attention_ref(
q,
k,
v,
self.sinks,
self.sm_scale,
self.sliding_window,
offset,
)
torch.testing.assert_close(t, t1)
t = t1
with record_function("c_proj"):
t = self.out(t)
t = x + t
return t
class MLPBlock(torch.nn.Module):
def __init__(
self,
config: ModelConfig,
layer_idx: int = 0,
device: torch.device | None = None,
):
super().__init__()
self.layer_idx = layer_idx
self.num_experts = config.num_experts
self.experts_per_token = config.experts_per_token
self.swiglu_limit = config.swiglu_limit
self.norm = RMSNorm(config.hidden_size, device=device)
self.gate = torch.nn.ParameterDict({
"weight": torch.nn.Parameter(
torch.empty(
(config.hidden_size, config.num_experts),
device=device,
dtype=torch.bfloat16,
)
),
"bias": torch.nn.Parameter(
torch.empty(
(config.num_experts,),
device=device,
dtype=torch.bfloat16,
)
),
})
self.mlp1_weight_tensor, self.mlp1_weight_mx = quantize_mx4(
torch.empty(
(
config.num_experts,
config.hidden_size,
config.intermediate_size * 2,
),
device=device,
dtype=torch.bfloat16,
),
)
self.mlp1_weight = torch.nn.Parameter(self.mlp1_weight_tensor.storage.data, requires_grad=False)
self.mlp1_bias = torch.nn.Parameter(
torch.empty(
(config.num_experts, config.intermediate_size * 2),
device=device,
dtype=torch.bfloat16,
)
)
self.mlp2_weight_tensor, self.mlp2_weight_mx = quantize_mx4(
torch.empty(
(
config.num_experts,
config.intermediate_size,
config.hidden_size,
),
device=device,
dtype=torch.bfloat16,
),
)
self.mlp2_weight = torch.nn.Parameter(self.mlp2_weight_tensor.storage.data, requires_grad=False)
self.mlp2_bias = torch.nn.Parameter(
torch.empty(
(config.num_experts, config.hidden_size),
device=device,
dtype=torch.bfloat16,
)
)
@record_function("mlp")
def forward(self, x: torch.Tensor) -> torch.Tensor:
batch_size, n_ctx, dim = x.shape
t = self.norm(x)
t = t.view(batch_size * n_ctx, dim)
t = moe(
t,
self.gate["weight"],
self.mlp1_weight_tensor, self.mlp1_weight_mx,
self.mlp2_weight_tensor, self.mlp2_weight_mx,
self.gate["bias"].float(),
self.mlp1_bias.float(),
self.mlp2_bias.float(),
experts_per_token=self.experts_per_token,
num_experts=self.num_experts,
swiglu_limit=self.swiglu_limit,
)
t = t.view(batch_size, n_ctx, dim)
return x + t
class TransformerBlock(torch.nn.Module):
def __init__(
self,
config: ModelConfig,
layer_idx: int,
device: torch.device | None = None,
):
super().__init__()
self.layer_idx = layer_idx
self.attn = AttentionBlock(config, layer_idx, device)
self.mlp = MLPBlock(config, layer_idx, device)
def forward(self, x: torch.Tensor, cache: Cache | None = None) -> torch.Tensor:
x = self.attn(x, cache=cache)
x = self.mlp(x)
return x
class Transformer(torch.nn.Module):
def __init__(
self,
config: ModelConfig,
device: torch.device | None = None,
):
super().__init__()
self.config = config
self.embedding = torch.nn.Embedding(
config.vocab_size, config.hidden_size, device=device, dtype=torch.bfloat16
)
self.block = torch.nn.ModuleList(
[
TransformerBlock(config, layer_idx, device)
for layer_idx in range(config.num_hidden_layers)
]
)
self.norm = RMSNorm(config.hidden_size, device=device)
self.unembedding = torch.nn.Linear(
config.hidden_size,
config.vocab_size,
bias=False,
device=device,
dtype=torch.bfloat16,
)
def forward(self, x: torch.Tensor, caches: list[Cache] | None = None) -> torch.Tensor:
caches=caches or [None] * len(self.block)
with record_function("embedding"):
x = self.embedding(x)
for block, cache in zip(self.block, caches):
with record_function("block"):
x = block(x, cache=cache)
with record_function("norm_f"):
x = self.norm(x)
with record_function("unembedding"):
x = self.unembedding(x)
return x.float()
@staticmethod
def from_checkpoint(
path: str, config: ModelConfig | None = None, device: str | torch.device = "cuda",
) -> "Transformer":
if not isinstance(device, torch.device):
device = torch.device(device)
if config is None:
config_path = os.path.join(path, "config.json")
with open(config_path, "r") as f:
json_config = json.load(f)
config = ModelConfig(**json_config)
model = Transformer(config=config, device=device)
model.eval()
checkpoint = Checkpoint(path, device)
for name, param in model.named_parameters():
torch.cuda.empty_cache()
loaded_tensor = checkpoint.get(name)
if "mlp1" in name:
if "weight" in name:
loaded_tensor, scales = quantize_mx4(loaded_tensor.mT.contiguous())
_, block_index, _, _ = name.split(".")
model.block[int(block_index)].mlp.mlp1_weight_mx = scales
param.data.copy_(loaded_tensor.storage.data)
else:
param.data.copy_(loaded_tensor)
elif "mlp2_weight" in name:
loaded_tensor, scales = quantize_mx4(loaded_tensor.mT.contiguous())
_, block_index, _, _ = name.split(".")
model.block[int(block_index)].mlp.mlp2_weight_mx = scales
param.data.copy_(loaded_tensor.storage.data)
elif "gate" in name and loaded_tensor.ndim == 2:
loaded_tensor = loaded_tensor.mT.contiguous()
param.data.copy_(loaded_tensor)
else:
param.data.copy_(loaded_tensor)
# NOTE: Required to avoid OOM errors
torch.cuda.empty_cache()
return model
class TokenGenerator:
@torch.inference_mode()
def __init__(self, checkpoint: str, context: int, device: torch.device):
self.device = device
self.model = Transformer.from_checkpoint(checkpoint, device=self.device)
self.caches = [Cache(1, context, self.model.config.num_key_value_heads, device=self.device) for _ in range(len(self.model.block))]
self.input_token = torch.zeros(1, dtype=torch.int32, device=self.device)
# warmup
self.model(self.input_token[None, :], caches=self.caches)
# capture for sampling
self.graph = torch.cuda.CUDAGraph()
with torch.cuda.graph(self.graph):
self.logits = self.model(self.input_token[None, :], caches=self.caches)[0]
@torch.inference_mode()
def generate(self,
prompt_tokens: list[int],
stop_tokens: list[int] | None = None,
temperature: float = 1.0,
max_tokens: int = 0,
return_logprobs: bool = False):
stop_tokens = stop_tokens or []
for cache in self.caches:
cache.reset()
prompt_tokens = torch.as_tensor(prompt_tokens, dtype=torch.int32, device=self.device)
self.model(prompt_tokens[None, :-1], self.caches)
predicted_token = prompt_tokens[-1]
num_generated_tokens = 0
while max_tokens == 0 or num_generated_tokens < max_tokens:
self.input_token[0] = predicted_token
self.graph.replay()
if temperature == 0.0:
predicted_token = torch.argmax(self.logits[-1, :], dim=-1).item()
else:
probs = torch.softmax(self.logits * (1.0 / temperature), dim=-1)
predicted_token = torch.multinomial(probs[-1, :], num_samples=1).item()
num_generated_tokens += 1
if return_logprobs:
logprobs = torch.log_softmax(self.logits[-1, :], dim=-1)
selected_logprobs = logprobs[predicted_token].item()
yield predicted_token, selected_logprobs
else:
yield predicted_token
if predicted_token in stop_tokens:
break
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/triton/model.py",
"license": "Apache License 2.0",
"lines": 461,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/gpt-oss:gpt_oss/triton/moe.py | import torch
from torch.profiler import record_function
import triton_kernels
import triton_kernels.swiglu
from triton_kernels.numerics_details.mxfp import downcast_to_mxfp
from triton_kernels.matmul_ogs import PrecisionConfig, FlexCtx, FnSpecs, FusedActivation
from triton_kernels.matmul_ogs import matmul_ogs
from triton_kernels.numerics import InFlexData
from triton_kernels.routing import routing
from triton_kernels.tensor import convert_layout
from triton_kernels.tensor_details.layout import StridedLayout, HopperMXScaleLayout, HopperMXValueLayout
from triton_kernels.tensor import wrap_torch_tensor, FP4
def quantize_mx4(w):
w, w_scale = downcast_to_mxfp(w.to(torch.bfloat16), torch.uint8, axis=1)
w = convert_layout(wrap_torch_tensor(w, dtype=FP4), HopperMXValueLayout, mx_axis=1)
w_scale = convert_layout(wrap_torch_tensor(w_scale), StridedLayout)
return w, w_scale
def swiglu(x, alpha: float = 1.702, limit: float = 7.0, interleaved: bool = True):
if interleaved:
x_glu, x_linear = x[..., ::2], x[..., 1::2]
else:
x_glu, x_linear = torch.chunk(x, 2, dim=-1)
x_glu = x_glu.clamp(min=None, max=limit)
x_linear = x_linear.clamp(min=-limit, max=limit)
out_glu = x_glu * torch.sigmoid(alpha * x_glu)
return out_glu * (x_linear + 1)
def moe(x, wg, w1, w1_mx, w2, w2_mx, bg, b1, b2, experts_per_token=4, num_experts=128, swiglu_limit=7.0, fused_act=True, interleaved=True):
if x.numel() == 0:
return x
pc1 = PrecisionConfig(weight_scale=w1_mx, flex_ctx=FlexCtx(rhs_data=InFlexData()))
pc2 = PrecisionConfig(weight_scale=w2_mx, flex_ctx=FlexCtx(rhs_data=InFlexData()))
pcg = PrecisionConfig(flex_ctx=FlexCtx(rhs_data=InFlexData()))
with record_function("wg"):
logits = matmul_ogs(x, wg, bg, precision_config=pcg)
with record_function("routing"):
rdata, gather_indx, scatter_indx = routing(logits, experts_per_token, simulated_ep=1)
if fused_act:
assert interleaved, "Fused activation requires interleaved weights"
with record_function("w1+swiglu"):
act = FusedActivation(FnSpecs("swiglu", triton_kernels.swiglu.swiglu_fn, ("alpha", "limit")), (1.702, swiglu_limit), 2)
x = matmul_ogs(x, w1, b1, rdata, gather_indx=gather_indx, precision_config=pc1, fused_activation=act)
else:
with record_function("w1"):
x = matmul_ogs(x, w1, b1, rdata, gather_indx=gather_indx, precision_config=pc1)
with record_function("swiglu"):
x = swiglu(x, limit=swiglu_limit, interleaved=interleaved)
with record_function("w2"):
x = matmul_ogs(x, w2, b2, rdata, scatter_indx=scatter_indx, precision_config=pc2, gammas=rdata.gate_scal)
return x
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/triton/moe.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:gpt_oss/vllm/token_generator.py | from vllm import LLMEngine, EngineArgs, SamplingParams, TokensPrompt
class TokenGenerator:
def __init__(self, model_path: str, tensor_parallel_size: int = 1):
args = EngineArgs(
model=model_path,
tensor_parallel_size=tensor_parallel_size,
)
self.engine = LLMEngine.from_engine_args(args)
self.request_id = 0
def generate(self,
prompt_tokens: list[int],
stop_tokens: list[int] | None = None,
temperature: float = 1.0,
max_tokens: int = 0,
return_logprobs: bool = False):
if max_tokens == 0:
max_tokens = None
request_id = str(self.request_id)
self.request_id += 1
sampling_params = SamplingParams(temperature=temperature,
max_tokens=max_tokens,
stop_token_ids=stop_tokens,
logprobs=0 if return_logprobs else None)
prompt = TokensPrompt(prompt_token_ids=prompt_tokens)
self.engine.add_request(request_id, prompt, sampling_params)
last_token_id = []
while self.engine.has_unfinished_requests():
step_outputs = self.engine.step()
output = step_outputs[0].outputs[0]
token_ids = output.token_ids
logprobs_list = output.logprobs if hasattr(output, "logprobs") else None
new_token_ids = token_ids[len(last_token_id):]
new_logprobs = logprobs_list[len(last_token_id):] if logprobs_list is not None else [None] * len(new_token_ids)
for token_id, logprobs in zip(new_token_ids, new_logprobs):
last_token_id.append(token_id)
if return_logprobs:
logprob_val = None
if logprobs is not None and token_id in logprobs:
logprob_val = logprobs[token_id].logprob
yield (token_id, logprob_val)
else:
yield token_id
if stop_tokens is not None and token_id in stop_tokens:
break
| {
"repo_id": "openai/gpt-oss",
"file_path": "gpt_oss/vllm/token_generator.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/gpt-oss:tests/test_responses_api.py | import time
import pytest
from fastapi.testclient import TestClient
from openai_harmony import (
HarmonyEncodingName,
load_harmony_encoding,
)
from gpt_oss.responses_api.api_server import create_api_server
encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
fake_tokens = encoding.encode(
"<|channel|>final<|message|>Hey there<|return|>", allowed_special="all"
)
token_queue = fake_tokens.copy()
def stub_infer_next_token(
tokens: list[int], temperature: float = 0.0, new_request: bool = False
) -> int:
global token_queue
next_tok = token_queue.pop(0)
if len(token_queue) == 0:
token_queue = fake_tokens.copy()
time.sleep(0.1)
return next_tok
@pytest.fixture
def test_client():
return TestClient(
create_api_server(infer_next_token=stub_infer_next_token, encoding=encoding)
)
def test_health_check(test_client):
response = test_client.post(
"/v1/responses",
json={
"model": "gpt-oss-120b",
"input": "Hello, world!",
},
)
print(response.json())
assert response.status_code == 200
| {
"repo_id": "openai/gpt-oss",
"file_path": "tests/test_responses_api.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:examples/responses/websocket.py | from __future__ import annotations
import json
import argparse
from typing import TYPE_CHECKING, Dict, Union, Literal, Optional, TypedDict, NamedTuple, cast
from openai import OpenAI
from openai.types.responses import (
FunctionToolParam,
ToolChoiceOptions,
ResponseInputParam,
ResponseFailedEvent,
ResponseCompletedEvent,
ResponseInputItemParam,
ResponseIncompleteEvent,
ToolChoiceFunctionParam,
)
if TYPE_CHECKING:
from openai.resources.responses.responses import ResponsesConnection
ToolName = Literal["get_sku_inventory", "get_supplier_eta", "get_quality_alerts"]
ToolChoice = Union[ToolChoiceOptions, ToolChoiceFunctionParam]
class DemoTurn(TypedDict):
tool_name: ToolName
prompt: str
class SKUArguments(TypedDict):
sku: str
class SKUInventoryOutput(TypedDict):
sku: str
warehouse: str
on_hand_units: int
reserved_units: int
reorder_point: int
safety_stock: int
class SupplierShipment(TypedDict):
shipment_id: str
eta_date: str
quantity: int
risk: str
class SupplierETAOutput(TypedDict):
sku: str
supplier_shipments: list[SupplierShipment]
class QualityAlert(TypedDict):
alert_id: str
status: str
severity: str
summary: str
class QualityAlertsOutput(TypedDict):
sku: str
alerts: list[QualityAlert]
class FunctionCallOutputItem(TypedDict):
type: Literal["function_call_output"]
call_id: str
output: str
class FunctionCallRequest(NamedTuple):
name: str
arguments_json: str
call_id: str
class RunResponseResult(NamedTuple):
text: str
response_id: str
function_calls: list[FunctionCallRequest]
class RunTurnResult(NamedTuple):
assistant_text: str
response_id: str
ToolOutput = Union[SKUInventoryOutput, SupplierETAOutput, QualityAlertsOutput]
TOOLS: list[FunctionToolParam] = [
{
"type": "function",
"name": "get_sku_inventory",
"description": "Return froge pond inventory details for a SKU.",
"strict": True,
"parameters": {
"type": "object",
"properties": {
"sku": {
"type": "string",
"description": "Stock-keeping unit identifier, such as sku-froge-lily-pad-deluxe.",
}
},
"required": ["sku"],
"additionalProperties": False,
},
},
{
"type": "function",
"name": "get_supplier_eta",
"description": "Return tadpole supplier restock ETA data for a SKU.",
"strict": True,
"parameters": {
"type": "object",
"properties": {
"sku": {
"type": "string",
"description": "Stock-keeping unit identifier, such as sku-froge-lily-pad-deluxe.",
}
},
"required": ["sku"],
"additionalProperties": False,
},
},
{
"type": "function",
"name": "get_quality_alerts",
"description": "Return recent froge quality alerts for a SKU.",
"strict": True,
"parameters": {
"type": "object",
"properties": {
"sku": {
"type": "string",
"description": "Stock-keeping unit identifier, such as sku-froge-lily-pad-deluxe.",
}
},
"required": ["sku"],
"additionalProperties": False,
},
},
]
DEMO_TURNS: list[DemoTurn] = [
{
"tool_name": "get_sku_inventory",
"prompt": "Use get_sku_inventory for sku='sku-froge-lily-pad-deluxe' and summarize current pond stock health in one sentence.",
},
{
"tool_name": "get_supplier_eta",
"prompt": "Now use get_supplier_eta for the same SKU and summarize restock ETA and tadpole shipment risk.",
},
{
"tool_name": "get_quality_alerts",
"prompt": "Finally use get_quality_alerts for the same SKU and summarize unresolved froge quality concerns in one short paragraph.",
},
]
BETA_HEADER_VALUE = "responses_websockets=2026-02-06"
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description=("Run a 3-turn Responses WebSocket demo with function calling and chained previous_response_id.")
)
parser.add_argument("--model", default="gpt-5.2", help="Model used in the `response.create` payload.")
parser.add_argument(
"--use-beta-header",
action="store_true",
help=f"Include `OpenAI-Beta: {BETA_HEADER_VALUE}` for beta websocket behavior.",
)
parser.add_argument(
"--show-events",
action="store_true",
help="Print non-text event types while streaming.",
)
parser.add_argument(
"--show-tool-io",
action="store_true",
help="Print each tool call and tool output payload.",
)
return parser.parse_args()
def parse_tool_name(name: str) -> ToolName:
if name not in {"get_sku_inventory", "get_supplier_eta", "get_quality_alerts"}:
raise ValueError(f"Unsupported tool requested: {name}")
return cast(ToolName, name)
def parse_sku_arguments(raw_arguments: str) -> SKUArguments:
parsed_raw = json.loads(raw_arguments)
if not isinstance(parsed_raw, dict):
raise ValueError(f"Tool arguments must be a JSON object: {raw_arguments}")
parsed = cast(Dict[str, object], parsed_raw)
sku_value = parsed.get("sku")
if not isinstance(sku_value, str):
raise ValueError(f"Tool arguments must include a string `sku`: {raw_arguments}")
return {"sku": sku_value}
def call_tool(name: ToolName, arguments: SKUArguments) -> ToolOutput:
sku = arguments["sku"]
if name == "get_sku_inventory":
return {
"sku": sku,
"warehouse": "pond-west-1",
"on_hand_units": 84,
"reserved_units": 26,
"reorder_point": 60,
"safety_stock": 40,
}
if name == "get_supplier_eta":
return {
"sku": sku,
"supplier_shipments": [
{
"shipment_id": "frog_ship_2201",
"eta_date": "2026-02-24",
"quantity": 180,
"risk": "low",
},
{
"shipment_id": "frog_ship_2205",
"eta_date": "2026-03-03",
"quantity": 220,
"risk": "medium",
},
],
}
if name == "get_quality_alerts":
return {
"sku": sku,
"alerts": [
{
"alert_id": "frog_qa_781",
"status": "open",
"severity": "high",
"summary": "Lily-pad coating chipping in lot LP-42",
},
{
"alert_id": "frog_qa_795",
"status": "in_progress",
"severity": "medium",
"summary": "Pond-crate scuff rate above threshold",
},
{
"alert_id": "frog_qa_802",
"status": "resolved",
"severity": "low",
"summary": "Froge label alignment issue corrected",
},
],
}
raise ValueError(f"Unknown tool: {name}")
def run_response(
*,
connection: ResponsesConnection,
model: str,
previous_response_id: Optional[str],
input_payload: Union[str, ResponseInputParam],
tools: list[FunctionToolParam],
tool_choice: ToolChoice,
show_events: bool,
) -> RunResponseResult:
connection.response.create(
model=model,
input=input_payload,
stream=True,
previous_response_id=previous_response_id,
tools=tools,
tool_choice=tool_choice,
)
text_parts: list[str] = []
function_calls: list[FunctionCallRequest] = []
response_id: Optional[str] = None
for event in connection:
if event.type == "response.output_text.delta":
text_parts.append(event.delta)
continue
if event.type == "response.output_item.done" and event.item.type == "function_call":
function_calls.append(
FunctionCallRequest(
name=event.item.name,
arguments_json=event.item.arguments,
call_id=event.item.call_id,
)
)
continue
if getattr(event, "type", None) == "error":
raise RuntimeError(f"WebSocket error event: {event!r}")
if isinstance(event, (ResponseCompletedEvent, ResponseFailedEvent, ResponseIncompleteEvent)):
response_id = event.response.id
if not isinstance(event, ResponseCompletedEvent):
raise RuntimeError(f"Response ended with {event.type} (id={response_id})")
if show_events:
print(f"[{event.type}]")
break
if getattr(event, "type", None) == "response.done":
# Responses over WebSocket currently emit `response.done` as the final event.
# The payload still includes `response.id`, which we use for chaining.
event_response = getattr(event, "response", None)
event_response_id: Optional[str] = None
if isinstance(event_response, dict):
event_response_dict = cast(Dict[str, object], event_response)
raw_event_response_id = event_response_dict.get("id")
if isinstance(raw_event_response_id, str):
event_response_id = raw_event_response_id
else:
raw_event_response_id = getattr(event_response, "id", None)
if isinstance(raw_event_response_id, str):
event_response_id = raw_event_response_id
if not isinstance(event_response_id, str):
raise RuntimeError(f"response.done event did not include a valid response.id: {event!r}")
response_id = event_response_id
if show_events:
print("[response.done]")
break
if show_events:
print(f"[{event.type}]")
if response_id is None:
raise RuntimeError("No terminal response event received.")
return RunResponseResult(
text="".join(text_parts),
response_id=response_id,
function_calls=function_calls,
)
def run_turn(
*,
connection: ResponsesConnection,
model: str,
previous_response_id: Optional[str],
turn_prompt: str,
forced_tool_name: ToolName,
show_events: bool,
show_tool_io: bool,
) -> RunTurnResult:
accumulated_text_parts: list[str] = []
current_input: Union[str, ResponseInputParam] = turn_prompt
current_tool_choice: ToolChoice = {"type": "function", "name": forced_tool_name}
current_previous_response_id = previous_response_id
while True:
response_result = run_response(
connection=connection,
model=model,
previous_response_id=current_previous_response_id,
input_payload=current_input,
tools=TOOLS,
tool_choice=current_tool_choice,
show_events=show_events,
)
if response_result.text:
accumulated_text_parts.append(response_result.text)
current_previous_response_id = response_result.response_id
if not response_result.function_calls:
break
tool_outputs: ResponseInputParam = []
for function_call in response_result.function_calls:
tool_name = parse_tool_name(function_call.name)
arguments = parse_sku_arguments(function_call.arguments_json)
output_payload = call_tool(tool_name, arguments)
if show_tool_io:
print(f"[tool_call] {function_call.name}({function_call.arguments_json})")
print(f"[tool_output] {json.dumps(output_payload)}")
function_call_output: FunctionCallOutputItem = {
"type": "function_call_output",
"call_id": function_call.call_id,
"output": json.dumps(output_payload),
}
tool_outputs.append(cast(ResponseInputItemParam, function_call_output))
current_input = tool_outputs
current_tool_choice = "none"
return RunTurnResult(
assistant_text="".join(accumulated_text_parts).strip(),
response_id=current_previous_response_id,
)
def main() -> None:
args = parse_args()
client = OpenAI()
extra_headers = {"OpenAI-Beta": BETA_HEADER_VALUE} if args.use_beta_header else {}
with client.responses.connect(extra_headers=extra_headers) as connection:
previous_response_id: Optional[str] = None
for index, turn in enumerate(DEMO_TURNS, start=1):
print(f"\n=== Turn {index} ===")
print(f"User: {turn['prompt']}")
turn_result = run_turn(
connection=connection,
model=args.model,
previous_response_id=previous_response_id,
turn_prompt=turn["prompt"],
forced_tool_name=turn["tool_name"],
show_events=args.show_events,
show_tool_io=args.show_tool_io,
)
previous_response_id = turn_result.response_id
print(f"Assistant: {turn_result.assistant_text}")
if __name__ == "__main__":
main()
| {
"repo_id": "openai/openai-python",
"file_path": "examples/responses/websocket.py",
"license": "Apache License 2.0",
"lines": 362,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/types/responses/response_conversation_param_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Required, TypedDict
__all__ = ["ResponseConversationParamParam"]
class ResponseConversationParamParam(TypedDict, total=False):
"""The conversation that this response belongs to."""
id: Required[str]
"""The unique ID of the conversation."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_conversation_param_param.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_input.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List
from typing_extensions import TypeAlias
from .response_input_item import ResponseInputItem
__all__ = ["ResponseInput"]
ResponseInput: TypeAlias = List[ResponseInputItem]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_input.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/responses_client_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal, TypeAlias
from .tool import Tool
from ..._models import BaseModel
from .response_input import ResponseInput
from .response_prompt import ResponsePrompt
from .tool_choice_mcp import ToolChoiceMcp
from ..shared.metadata import Metadata
from ..shared.reasoning import Reasoning
from .tool_choice_shell import ToolChoiceShell
from .tool_choice_types import ToolChoiceTypes
from .tool_choice_custom import ToolChoiceCustom
from .response_includable import ResponseIncludable
from .tool_choice_allowed import ToolChoiceAllowed
from .tool_choice_options import ToolChoiceOptions
from .response_text_config import ResponseTextConfig
from .tool_choice_function import ToolChoiceFunction
from ..shared.responses_model import ResponsesModel
from .tool_choice_apply_patch import ToolChoiceApplyPatch
from .response_conversation_param import ResponseConversationParam
__all__ = ["ResponsesClientEvent", "ContextManagement", "Conversation", "StreamOptions", "ToolChoice"]
class ContextManagement(BaseModel):
type: str
"""The context management entry type. Currently only 'compaction' is supported."""
compact_threshold: Optional[int] = None
"""Token threshold at which compaction should be triggered for this entry."""
Conversation: TypeAlias = Union[str, ResponseConversationParam, None]
class StreamOptions(BaseModel):
"""Options for streaming responses. Only set this when you set `stream: true`."""
include_obfuscation: Optional[bool] = None
"""When true, stream obfuscation will be enabled.
Stream obfuscation adds random characters to an `obfuscation` field on streaming
delta events to normalize payload sizes as a mitigation to certain side-channel
attacks. These obfuscation fields are included by default, but add a small
amount of overhead to the data stream. You can set `include_obfuscation` to
false to optimize for bandwidth if you trust the network links between your
application and the OpenAI API.
"""
ToolChoice: TypeAlias = Union[
ToolChoiceOptions,
ToolChoiceAllowed,
ToolChoiceTypes,
ToolChoiceFunction,
ToolChoiceMcp,
ToolChoiceCustom,
ToolChoiceApplyPatch,
ToolChoiceShell,
]
class ResponsesClientEvent(BaseModel):
type: Literal["response.create"]
"""The type of the client event. Always `response.create`."""
background: Optional[bool] = None
"""
Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
"""
context_management: Optional[List[ContextManagement]] = None
"""Context management configuration for this request."""
conversation: Optional[Conversation] = None
"""The conversation that this response belongs to.
Items from this conversation are prepended to `input_items` for this response
request. Input items and output items from this response are automatically added
to this conversation after this response completes.
"""
include: Optional[List[ResponseIncludable]] = None
"""Specify additional output data to include in the model response.
Currently supported values are:
- `web_search_call.action.sources`: Include the sources of the web search tool
call.
- `code_interpreter_call.outputs`: Includes the outputs of python code execution
in code interpreter tool call items.
- `computer_call_output.output.image_url`: Include image urls from the computer
call output.
- `file_search_call.results`: Include the search results of the file search tool
call.
- `message.input_image.image_url`: Include image urls from the input message.
- `message.output_text.logprobs`: Include logprobs with assistant messages.
- `reasoning.encrypted_content`: Includes an encrypted version of reasoning
tokens in reasoning item outputs. This enables reasoning items to be used in
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
"""
input: Union[str, ResponseInput, None] = None
"""Text, image, or file inputs to the model, used to generate a response.
Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Image inputs](https://platform.openai.com/docs/guides/images)
- [File inputs](https://platform.openai.com/docs/guides/pdf-files)
- [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
- [Function calling](https://platform.openai.com/docs/guides/function-calling)
"""
instructions: Optional[str] = None
"""A system (or developer) message inserted into the model's context.
When using along with `previous_response_id`, the instructions from a previous
response will not be carried over to the next response. This makes it simple to
swap out system (or developer) messages in new responses.
"""
max_output_tokens: Optional[int] = None
"""
An upper bound for the number of tokens that can be generated for a response,
including visible output tokens and
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
"""
max_tool_calls: Optional[int] = None
"""
The maximum number of total calls to built-in tools that can be processed in a
response. This maximum number applies across all built-in tool calls, not per
individual tool. Any further attempts to call a tool by the model will be
ignored.
"""
metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
model: Optional[ResponsesModel] = None
"""Model ID used to generate the response, like `gpt-4o` or `o3`.
OpenAI offers a wide range of models with different capabilities, performance
characteristics, and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
"""
parallel_tool_calls: Optional[bool] = None
"""Whether to allow the model to run tool calls in parallel."""
previous_response_id: Optional[str] = None
"""The unique ID of the previous response to the model.
Use this to create multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
"""
prompt: Optional[ResponsePrompt] = None
"""
Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""
prompt_cache_key: Optional[str] = None
"""
Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
"""
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] = None
"""The retention policy for the prompt cache.
Set to `24h` to enable extended prompt caching, which keeps cached prefixes
active for longer, up to a maximum of 24 hours.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
"""
reasoning: Optional[Reasoning] = None
"""**gpt-5 and o-series models only**
Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
"""
safety_identifier: Optional[str] = None
"""
A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user, with a maximum length of 64 characters. We recommend
hashing their username or email address, in order to avoid sending us any
identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None
"""Specifies the processing type used for serving the request.
- If set to 'auto', then the request will be processed with the service tier
configured in the Project settings. Unless otherwise configured, the Project
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
When the `service_tier` parameter is set, the response body will include the
`service_tier` value based on the processing mode actually used to serve the
request. This response value may be different from the value set in the
parameter.
"""
store: Optional[bool] = None
"""Whether to store the generated model response for later retrieval via API."""
stream: Optional[bool] = None
"""
If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
for more information.
"""
stream_options: Optional[StreamOptions] = None
"""Options for streaming responses. Only set this when you set `stream: true`."""
temperature: Optional[float] = None
"""What sampling temperature to use, between 0 and 2.
Higher values like 0.8 will make the output more random, while lower values like
0.2 will make it more focused and deterministic. We generally recommend altering
this or `top_p` but not both.
"""
text: Optional[ResponseTextConfig] = None
"""Configuration options for a text response from the model.
Can be plain text or structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
"""
tool_choice: Optional[ToolChoice] = None
"""
How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
"""
tools: Optional[List[Tool]] = None
"""An array of tools the model may call while generating a response.
You can specify which tool to use by setting the `tool_choice` parameter.
We support the following categories of tools:
- **Built-in tools**: Tools that are provided by OpenAI that extend the model's
capabilities, like
[web search](https://platform.openai.com/docs/guides/tools-web-search) or
[file search](https://platform.openai.com/docs/guides/tools-file-search).
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
Learn more about
[function calling](https://platform.openai.com/docs/guides/function-calling).
You can also use custom tools to call your own code.
"""
top_logprobs: Optional[int] = None
"""
An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
"""
top_p: Optional[float] = None
"""
An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
"""
truncation: Optional[Literal["auto", "disabled"]] = None
"""The truncation strategy to use for the model response.
- `auto`: If the input to this Response exceeds the model's context window size,
the model will truncate the response to fit the context window by dropping
items from the beginning of the conversation.
- `disabled` (default): If the input size will exceed the context window size
for a model, the request will fail with a 400 error.
"""
user: Optional[str] = None
"""This field is being replaced by `safety_identifier` and `prompt_cache_key`.
Use `prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/responses_client_event.py",
"license": "Apache License 2.0",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/responses_client_event_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .tool_param import ToolParam
from .response_includable import ResponseIncludable
from .tool_choice_options import ToolChoiceOptions
from .response_input_param import ResponseInputParam
from .response_prompt_param import ResponsePromptParam
from .tool_choice_mcp_param import ToolChoiceMcpParam
from ..shared_params.metadata import Metadata
from .tool_choice_shell_param import ToolChoiceShellParam
from .tool_choice_types_param import ToolChoiceTypesParam
from ..shared_params.reasoning import Reasoning
from .tool_choice_custom_param import ToolChoiceCustomParam
from .tool_choice_allowed_param import ToolChoiceAllowedParam
from .response_text_config_param import ResponseTextConfigParam
from .tool_choice_function_param import ToolChoiceFunctionParam
from .tool_choice_apply_patch_param import ToolChoiceApplyPatchParam
from ..shared_params.responses_model import ResponsesModel
from .response_conversation_param_param import ResponseConversationParamParam
__all__ = ["ResponsesClientEventParam", "ContextManagement", "Conversation", "StreamOptions", "ToolChoice"]
class ContextManagement(TypedDict, total=False):
type: Required[str]
"""The context management entry type. Currently only 'compaction' is supported."""
compact_threshold: Optional[int]
"""Token threshold at which compaction should be triggered for this entry."""
Conversation: TypeAlias = Union[str, ResponseConversationParamParam]
class StreamOptions(TypedDict, total=False):
"""Options for streaming responses. Only set this when you set `stream: true`."""
include_obfuscation: bool
"""When true, stream obfuscation will be enabled.
Stream obfuscation adds random characters to an `obfuscation` field on streaming
delta events to normalize payload sizes as a mitigation to certain side-channel
attacks. These obfuscation fields are included by default, but add a small
amount of overhead to the data stream. You can set `include_obfuscation` to
false to optimize for bandwidth if you trust the network links between your
application and the OpenAI API.
"""
ToolChoice: TypeAlias = Union[
ToolChoiceOptions,
ToolChoiceAllowedParam,
ToolChoiceTypesParam,
ToolChoiceFunctionParam,
ToolChoiceMcpParam,
ToolChoiceCustomParam,
ToolChoiceApplyPatchParam,
ToolChoiceShellParam,
]
class ResponsesClientEventParam(TypedDict, total=False):
type: Required[Literal["response.create"]]
"""The type of the client event. Always `response.create`."""
background: Optional[bool]
"""
Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
"""
context_management: Optional[Iterable[ContextManagement]]
"""Context management configuration for this request."""
conversation: Optional[Conversation]
"""The conversation that this response belongs to.
Items from this conversation are prepended to `input_items` for this response
request. Input items and output items from this response are automatically added
to this conversation after this response completes.
"""
include: Optional[List[ResponseIncludable]]
"""Specify additional output data to include in the model response.
Currently supported values are:
- `web_search_call.action.sources`: Include the sources of the web search tool
call.
- `code_interpreter_call.outputs`: Includes the outputs of python code execution
in code interpreter tool call items.
- `computer_call_output.output.image_url`: Include image urls from the computer
call output.
- `file_search_call.results`: Include the search results of the file search tool
call.
- `message.input_image.image_url`: Include image urls from the input message.
- `message.output_text.logprobs`: Include logprobs with assistant messages.
- `reasoning.encrypted_content`: Includes an encrypted version of reasoning
tokens in reasoning item outputs. This enables reasoning items to be used in
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
"""
input: Union[str, ResponseInputParam]
"""Text, image, or file inputs to the model, used to generate a response.
Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Image inputs](https://platform.openai.com/docs/guides/images)
- [File inputs](https://platform.openai.com/docs/guides/pdf-files)
- [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
- [Function calling](https://platform.openai.com/docs/guides/function-calling)
"""
instructions: Optional[str]
"""A system (or developer) message inserted into the model's context.
When using along with `previous_response_id`, the instructions from a previous
response will not be carried over to the next response. This makes it simple to
swap out system (or developer) messages in new responses.
"""
max_output_tokens: Optional[int]
"""
An upper bound for the number of tokens that can be generated for a response,
including visible output tokens and
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
"""
max_tool_calls: Optional[int]
"""
The maximum number of total calls to built-in tools that can be processed in a
response. This maximum number applies across all built-in tool calls, not per
individual tool. Any further attempts to call a tool by the model will be
ignored.
"""
metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
model: ResponsesModel
"""Model ID used to generate the response, like `gpt-4o` or `o3`.
OpenAI offers a wide range of models with different capabilities, performance
characteristics, and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
"""
parallel_tool_calls: Optional[bool]
"""Whether to allow the model to run tool calls in parallel."""
previous_response_id: Optional[str]
"""The unique ID of the previous response to the model.
Use this to create multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
"""
prompt: Optional[ResponsePromptParam]
"""
Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""
prompt_cache_key: str
"""
Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
"""
prompt_cache_retention: Optional[Literal["in-memory", "24h"]]
"""The retention policy for the prompt cache.
Set to `24h` to enable extended prompt caching, which keeps cached prefixes
active for longer, up to a maximum of 24 hours.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
"""
reasoning: Optional[Reasoning]
"""**gpt-5 and o-series models only**
Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
"""
safety_identifier: str
"""
A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user, with a maximum length of 64 characters. We recommend
hashing their username or email address, in order to avoid sending us any
identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]]
"""Specifies the processing type used for serving the request.
- If set to 'auto', then the request will be processed with the service tier
configured in the Project settings. Unless otherwise configured, the Project
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
When the `service_tier` parameter is set, the response body will include the
`service_tier` value based on the processing mode actually used to serve the
request. This response value may be different from the value set in the
parameter.
"""
store: Optional[bool]
"""Whether to store the generated model response for later retrieval via API."""
stream: Optional[bool]
"""
If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
for more information.
"""
stream_options: Optional[StreamOptions]
"""Options for streaming responses. Only set this when you set `stream: true`."""
temperature: Optional[float]
"""What sampling temperature to use, between 0 and 2.
Higher values like 0.8 will make the output more random, while lower values like
0.2 will make it more focused and deterministic. We generally recommend altering
this or `top_p` but not both.
"""
text: ResponseTextConfigParam
"""Configuration options for a text response from the model.
Can be plain text or structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
"""
tool_choice: ToolChoice
"""
How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
"""
tools: Iterable[ToolParam]
"""An array of tools the model may call while generating a response.
You can specify which tool to use by setting the `tool_choice` parameter.
We support the following categories of tools:
- **Built-in tools**: Tools that are provided by OpenAI that extend the model's
capabilities, like
[web search](https://platform.openai.com/docs/guides/tools-web-search) or
[file search](https://platform.openai.com/docs/guides/tools-file-search).
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
Learn more about
[function calling](https://platform.openai.com/docs/guides/function-calling).
You can also use custom tools to call your own code.
"""
top_logprobs: Optional[int]
"""
An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
"""
top_p: Optional[float]
"""
An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
"""
truncation: Optional[Literal["auto", "disabled"]]
"""The truncation strategy to use for the model response.
- `auto`: If the input to this Response exceeds the model's context window size,
the model will truncate the response to fit the context window by dropping
items from the beginning of the conversation.
- `disabled` (default): If the input size will exceed the context window size
for a model, the request will fail with a 400 error.
"""
user: str
"""This field is being replaced by `safety_identifier` and `prompt_cache_key`.
Use `prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/responses_client_event_param.py",
"license": "Apache License 2.0",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/responses_server_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
from .response_error_event import ResponseErrorEvent
from .response_failed_event import ResponseFailedEvent
from .response_queued_event import ResponseQueuedEvent
from .response_created_event import ResponseCreatedEvent
from .response_completed_event import ResponseCompletedEvent
from .response_text_done_event import ResponseTextDoneEvent
from .response_audio_done_event import ResponseAudioDoneEvent
from .response_incomplete_event import ResponseIncompleteEvent
from .response_text_delta_event import ResponseTextDeltaEvent
from .response_audio_delta_event import ResponseAudioDeltaEvent
from .response_in_progress_event import ResponseInProgressEvent
from .response_refusal_done_event import ResponseRefusalDoneEvent
from .response_refusal_delta_event import ResponseRefusalDeltaEvent
from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent
from .response_output_item_done_event import ResponseOutputItemDoneEvent
from .response_content_part_done_event import ResponseContentPartDoneEvent
from .response_output_item_added_event import ResponseOutputItemAddedEvent
from .response_content_part_added_event import ResponseContentPartAddedEvent
from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent
from .response_reasoning_text_done_event import ResponseReasoningTextDoneEvent
from .response_mcp_call_in_progress_event import ResponseMcpCallInProgressEvent
from .response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent
from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent
from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEvent
from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent
from .response_mcp_call_arguments_done_event import ResponseMcpCallArgumentsDoneEvent
from .response_image_gen_call_completed_event import ResponseImageGenCallCompletedEvent
from .response_mcp_call_arguments_delta_event import ResponseMcpCallArgumentsDeltaEvent
from .response_mcp_list_tools_completed_event import ResponseMcpListToolsCompletedEvent
from .response_image_gen_call_generating_event import ResponseImageGenCallGeneratingEvent
from .response_web_search_call_completed_event import ResponseWebSearchCallCompletedEvent
from .response_web_search_call_searching_event import ResponseWebSearchCallSearchingEvent
from .response_file_search_call_completed_event import ResponseFileSearchCallCompletedEvent
from .response_file_search_call_searching_event import ResponseFileSearchCallSearchingEvent
from .response_image_gen_call_in_progress_event import ResponseImageGenCallInProgressEvent
from .response_mcp_list_tools_in_progress_event import ResponseMcpListToolsInProgressEvent
from .response_custom_tool_call_input_done_event import ResponseCustomToolCallInputDoneEvent
from .response_reasoning_summary_part_done_event import ResponseReasoningSummaryPartDoneEvent
from .response_reasoning_summary_text_done_event import ResponseReasoningSummaryTextDoneEvent
from .response_web_search_call_in_progress_event import ResponseWebSearchCallInProgressEvent
from .response_custom_tool_call_input_delta_event import ResponseCustomToolCallInputDeltaEvent
from .response_file_search_call_in_progress_event import ResponseFileSearchCallInProgressEvent
from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent
from .response_image_gen_call_partial_image_event import ResponseImageGenCallPartialImageEvent
from .response_output_text_annotation_added_event import ResponseOutputTextAnnotationAddedEvent
from .response_reasoning_summary_part_added_event import ResponseReasoningSummaryPartAddedEvent
from .response_reasoning_summary_text_delta_event import ResponseReasoningSummaryTextDeltaEvent
from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent
from .response_code_interpreter_call_code_done_event import ResponseCodeInterpreterCallCodeDoneEvent
from .response_code_interpreter_call_completed_event import ResponseCodeInterpreterCallCompletedEvent
from .response_code_interpreter_call_code_delta_event import ResponseCodeInterpreterCallCodeDeltaEvent
from .response_code_interpreter_call_in_progress_event import ResponseCodeInterpreterCallInProgressEvent
from .response_code_interpreter_call_interpreting_event import ResponseCodeInterpreterCallInterpretingEvent
__all__ = ["ResponsesServerEvent"]
ResponsesServerEvent: TypeAlias = Annotated[
Union[
ResponseAudioDeltaEvent,
ResponseAudioDoneEvent,
ResponseAudioTranscriptDeltaEvent,
ResponseAudioTranscriptDoneEvent,
ResponseCodeInterpreterCallCodeDeltaEvent,
ResponseCodeInterpreterCallCodeDoneEvent,
ResponseCodeInterpreterCallCompletedEvent,
ResponseCodeInterpreterCallInProgressEvent,
ResponseCodeInterpreterCallInterpretingEvent,
ResponseCompletedEvent,
ResponseContentPartAddedEvent,
ResponseContentPartDoneEvent,
ResponseCreatedEvent,
ResponseErrorEvent,
ResponseFileSearchCallCompletedEvent,
ResponseFileSearchCallInProgressEvent,
ResponseFileSearchCallSearchingEvent,
ResponseFunctionCallArgumentsDeltaEvent,
ResponseFunctionCallArgumentsDoneEvent,
ResponseInProgressEvent,
ResponseFailedEvent,
ResponseIncompleteEvent,
ResponseOutputItemAddedEvent,
ResponseOutputItemDoneEvent,
ResponseReasoningSummaryPartAddedEvent,
ResponseReasoningSummaryPartDoneEvent,
ResponseReasoningSummaryTextDeltaEvent,
ResponseReasoningSummaryTextDoneEvent,
ResponseReasoningTextDeltaEvent,
ResponseReasoningTextDoneEvent,
ResponseRefusalDeltaEvent,
ResponseRefusalDoneEvent,
ResponseTextDeltaEvent,
ResponseTextDoneEvent,
ResponseWebSearchCallCompletedEvent,
ResponseWebSearchCallInProgressEvent,
ResponseWebSearchCallSearchingEvent,
ResponseImageGenCallCompletedEvent,
ResponseImageGenCallGeneratingEvent,
ResponseImageGenCallInProgressEvent,
ResponseImageGenCallPartialImageEvent,
ResponseMcpCallArgumentsDeltaEvent,
ResponseMcpCallArgumentsDoneEvent,
ResponseMcpCallCompletedEvent,
ResponseMcpCallFailedEvent,
ResponseMcpCallInProgressEvent,
ResponseMcpListToolsCompletedEvent,
ResponseMcpListToolsFailedEvent,
ResponseMcpListToolsInProgressEvent,
ResponseOutputTextAnnotationAddedEvent,
ResponseQueuedEvent,
ResponseCustomToolCallInputDeltaEvent,
ResponseCustomToolCallInputDoneEvent,
],
PropertyInfo(discriminator="type"),
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/responses_server_event.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/resources/skills/content.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from ... import _legacy_response
from ..._types import Body, Query, Headers, NotGiven, not_given
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
StreamedBinaryAPIResponse,
AsyncStreamedBinaryAPIResponse,
to_custom_streamed_response_wrapper,
async_to_custom_streamed_response_wrapper,
)
from ..._base_client import make_request_options
__all__ = ["Content", "AsyncContent"]
class Content(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ContentWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ContentWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ContentWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ContentWithStreamingResponse(self)
def retrieve(
self,
skill_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Download a skill zip bundle by its ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
f"/skills/{skill_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
class AsyncContent(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncContentWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncContentWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncContentWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncContentWithStreamingResponse(self)
async def retrieve(
self,
skill_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Download a skill zip bundle by its ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
f"/skills/{skill_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
class ContentWithRawResponse:
def __init__(self, content: Content) -> None:
self._content = content
self.retrieve = _legacy_response.to_raw_response_wrapper(
content.retrieve,
)
class AsyncContentWithRawResponse:
def __init__(self, content: AsyncContent) -> None:
self._content = content
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
content.retrieve,
)
class ContentWithStreamingResponse:
def __init__(self, content: Content) -> None:
self._content = content
self.retrieve = to_custom_streamed_response_wrapper(
content.retrieve,
StreamedBinaryAPIResponse,
)
class AsyncContentWithStreamingResponse:
def __init__(self, content: AsyncContent) -> None:
self._content = content
self.retrieve = async_to_custom_streamed_response_wrapper(
content.retrieve,
AsyncStreamedBinaryAPIResponse,
)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/skills/content.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/resources/skills/skills.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Mapping, cast
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ...types import skill_list_params, skill_create_params, skill_update_params
from .content import (
Content,
AsyncContent,
ContentWithRawResponse,
AsyncContentWithRawResponse,
ContentWithStreamingResponse,
AsyncContentWithStreamingResponse,
)
from ..._types import (
Body,
Omit,
Query,
Headers,
NotGiven,
FileTypes,
SequenceNotStr,
omit,
not_given,
)
from ..._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...pagination import SyncCursorPage, AsyncCursorPage
from ...types.skill import Skill
from ..._base_client import AsyncPaginator, make_request_options
from .versions.versions import (
Versions,
AsyncVersions,
VersionsWithRawResponse,
AsyncVersionsWithRawResponse,
VersionsWithStreamingResponse,
AsyncVersionsWithStreamingResponse,
)
from ...types.deleted_skill import DeletedSkill
__all__ = ["Skills", "AsyncSkills"]
class Skills(SyncAPIResource):
@cached_property
def content(self) -> Content:
return Content(self._client)
@cached_property
def versions(self) -> Versions:
return Versions(self._client)
@cached_property
def with_raw_response(self) -> SkillsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return SkillsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> SkillsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return SkillsWithStreamingResponse(self)
def create(
self,
*,
files: Union[SequenceNotStr[FileTypes], FileTypes] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Skill:
"""
Create a new skill.
Args:
files: Skill files to upload (directory upload) or a single zip file.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal({"files": files})
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", "<array>"], ["files"]])
if extracted_files:
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
"/skills",
body=maybe_transform(body, skill_create_params.SkillCreateParams),
files=extracted_files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Skill,
)
def retrieve(
self,
skill_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Skill:
"""
Get a skill by its ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
return self._get(
f"/skills/{skill_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Skill,
)
def update(
self,
skill_id: str,
*,
default_version: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Skill:
"""
Update the default version pointer for a skill.
Args:
default_version: The skill version number to set as default.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
return self._post(
f"/skills/{skill_id}",
body=maybe_transform({"default_version": default_version}, skill_update_params.SkillUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Skill,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[Skill]:
"""
List all skills for the current project.
Args:
after: Identifier for the last item from the previous pagination request
limit: Number of items to retrieve
order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/skills",
page=SyncCursorPage[Skill],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
skill_list_params.SkillListParams,
),
),
model=Skill,
)
def delete(
self,
skill_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> DeletedSkill:
"""
Delete a skill by its ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
return self._delete(
f"/skills/{skill_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=DeletedSkill,
)
class AsyncSkills(AsyncAPIResource):
@cached_property
def content(self) -> AsyncContent:
return AsyncContent(self._client)
@cached_property
def versions(self) -> AsyncVersions:
return AsyncVersions(self._client)
@cached_property
def with_raw_response(self) -> AsyncSkillsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncSkillsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncSkillsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncSkillsWithStreamingResponse(self)
async def create(
self,
*,
files: Union[SequenceNotStr[FileTypes], FileTypes] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Skill:
"""
Create a new skill.
Args:
files: Skill files to upload (directory upload) or a single zip file.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal({"files": files})
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", "<array>"], ["files"]])
if extracted_files:
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/skills",
body=await async_maybe_transform(body, skill_create_params.SkillCreateParams),
files=extracted_files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Skill,
)
async def retrieve(
self,
skill_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Skill:
"""
Get a skill by its ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
return await self._get(
f"/skills/{skill_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Skill,
)
async def update(
self,
skill_id: str,
*,
default_version: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Skill:
"""
Update the default version pointer for a skill.
Args:
default_version: The skill version number to set as default.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
return await self._post(
f"/skills/{skill_id}",
body=await async_maybe_transform(
{"default_version": default_version}, skill_update_params.SkillUpdateParams
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Skill,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[Skill, AsyncCursorPage[Skill]]:
"""
List all skills for the current project.
Args:
after: Identifier for the last item from the previous pagination request
limit: Number of items to retrieve
order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/skills",
page=AsyncCursorPage[Skill],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
skill_list_params.SkillListParams,
),
),
model=Skill,
)
async def delete(
self,
skill_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> DeletedSkill:
"""
Delete a skill by its ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
return await self._delete(
f"/skills/{skill_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=DeletedSkill,
)
class SkillsWithRawResponse:
def __init__(self, skills: Skills) -> None:
self._skills = skills
self.create = _legacy_response.to_raw_response_wrapper(
skills.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
skills.retrieve,
)
self.update = _legacy_response.to_raw_response_wrapper(
skills.update,
)
self.list = _legacy_response.to_raw_response_wrapper(
skills.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
skills.delete,
)
@cached_property
def content(self) -> ContentWithRawResponse:
return ContentWithRawResponse(self._skills.content)
@cached_property
def versions(self) -> VersionsWithRawResponse:
return VersionsWithRawResponse(self._skills.versions)
class AsyncSkillsWithRawResponse:
def __init__(self, skills: AsyncSkills) -> None:
self._skills = skills
self.create = _legacy_response.async_to_raw_response_wrapper(
skills.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
skills.retrieve,
)
self.update = _legacy_response.async_to_raw_response_wrapper(
skills.update,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
skills.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
skills.delete,
)
@cached_property
def content(self) -> AsyncContentWithRawResponse:
return AsyncContentWithRawResponse(self._skills.content)
@cached_property
def versions(self) -> AsyncVersionsWithRawResponse:
return AsyncVersionsWithRawResponse(self._skills.versions)
class SkillsWithStreamingResponse:
def __init__(self, skills: Skills) -> None:
self._skills = skills
self.create = to_streamed_response_wrapper(
skills.create,
)
self.retrieve = to_streamed_response_wrapper(
skills.retrieve,
)
self.update = to_streamed_response_wrapper(
skills.update,
)
self.list = to_streamed_response_wrapper(
skills.list,
)
self.delete = to_streamed_response_wrapper(
skills.delete,
)
@cached_property
def content(self) -> ContentWithStreamingResponse:
return ContentWithStreamingResponse(self._skills.content)
@cached_property
def versions(self) -> VersionsWithStreamingResponse:
return VersionsWithStreamingResponse(self._skills.versions)
class AsyncSkillsWithStreamingResponse:
def __init__(self, skills: AsyncSkills) -> None:
self._skills = skills
self.create = async_to_streamed_response_wrapper(
skills.create,
)
self.retrieve = async_to_streamed_response_wrapper(
skills.retrieve,
)
self.update = async_to_streamed_response_wrapper(
skills.update,
)
self.list = async_to_streamed_response_wrapper(
skills.list,
)
self.delete = async_to_streamed_response_wrapper(
skills.delete,
)
@cached_property
def content(self) -> AsyncContentWithStreamingResponse:
return AsyncContentWithStreamingResponse(self._skills.content)
@cached_property
def versions(self) -> AsyncVersionsWithStreamingResponse:
return AsyncVersionsWithStreamingResponse(self._skills.versions)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/skills/skills.py",
"license": "Apache License 2.0",
"lines": 519,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/resources/skills/versions/content.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from .... import _legacy_response
from ...._types import Body, Query, Headers, NotGiven, not_given
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import (
StreamedBinaryAPIResponse,
AsyncStreamedBinaryAPIResponse,
to_custom_streamed_response_wrapper,
async_to_custom_streamed_response_wrapper,
)
from ...._base_client import make_request_options
__all__ = ["Content", "AsyncContent"]
class Content(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ContentWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ContentWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ContentWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ContentWithStreamingResponse(self)
def retrieve(
self,
version: str,
*,
skill_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Download a skill version zip bundle.
Args:
version: The skill version number.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
if not version:
raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
f"/skills/{skill_id}/versions/{version}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
class AsyncContent(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncContentWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncContentWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncContentWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncContentWithStreamingResponse(self)
async def retrieve(
self,
version: str,
*,
skill_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Download a skill version zip bundle.
Args:
version: The skill version number.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
if not version:
raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
f"/skills/{skill_id}/versions/{version}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
class ContentWithRawResponse:
def __init__(self, content: Content) -> None:
self._content = content
self.retrieve = _legacy_response.to_raw_response_wrapper(
content.retrieve,
)
class AsyncContentWithRawResponse:
def __init__(self, content: AsyncContent) -> None:
self._content = content
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
content.retrieve,
)
class ContentWithStreamingResponse:
def __init__(self, content: Content) -> None:
self._content = content
self.retrieve = to_custom_streamed_response_wrapper(
content.retrieve,
StreamedBinaryAPIResponse,
)
class AsyncContentWithStreamingResponse:
def __init__(self, content: AsyncContent) -> None:
self._content = content
self.retrieve = async_to_custom_streamed_response_wrapper(
content.retrieve,
AsyncStreamedBinaryAPIResponse,
)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/skills/versions/content.py",
"license": "Apache License 2.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/resources/skills/versions/versions.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Mapping, cast
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from .content import (
Content,
AsyncContent,
ContentWithRawResponse,
AsyncContentWithRawResponse,
ContentWithStreamingResponse,
AsyncContentWithStreamingResponse,
)
from ...._types import (
Body,
Omit,
Query,
Headers,
NotGiven,
FileTypes,
SequenceNotStr,
omit,
not_given,
)
from ...._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncCursorPage, AsyncCursorPage
from ...._base_client import AsyncPaginator, make_request_options
from ....types.skills import version_list_params, version_create_params
from ....types.skills.skill_version import SkillVersion
from ....types.skills.deleted_skill_version import DeletedSkillVersion
__all__ = ["Versions", "AsyncVersions"]
class Versions(SyncAPIResource):
@cached_property
def content(self) -> Content:
return Content(self._client)
@cached_property
def with_raw_response(self) -> VersionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return VersionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> VersionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return VersionsWithStreamingResponse(self)
def create(
self,
skill_id: str,
*,
default: bool | Omit = omit,
files: Union[SequenceNotStr[FileTypes], FileTypes] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SkillVersion:
"""
Create a new immutable skill version.
Args:
default: Whether to set this version as the default.
files: Skill files to upload (directory upload) or a single zip file.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
body = deepcopy_minimal(
{
"default": default,
"files": files,
}
)
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", "<array>"], ["files"]])
if extracted_files:
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
f"/skills/{skill_id}/versions",
body=maybe_transform(body, version_create_params.VersionCreateParams),
files=extracted_files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=SkillVersion,
)
def retrieve(
self,
version: str,
*,
skill_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SkillVersion:
"""
Get a specific skill version.
Args:
version: The version number to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
if not version:
raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
return self._get(
f"/skills/{skill_id}/versions/{version}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=SkillVersion,
)
def list(
self,
skill_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[SkillVersion]:
"""
List skill versions for a skill.
Args:
after: The skill version ID to start after.
limit: Number of versions to retrieve.
order: Sort order of results by version number.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
return self._get_api_list(
f"/skills/{skill_id}/versions",
page=SyncCursorPage[SkillVersion],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
version_list_params.VersionListParams,
),
),
model=SkillVersion,
)
def delete(
self,
version: str,
*,
skill_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> DeletedSkillVersion:
"""
Delete a skill version.
Args:
version: The skill version number.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
if not version:
raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
return self._delete(
f"/skills/{skill_id}/versions/{version}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=DeletedSkillVersion,
)
class AsyncVersions(AsyncAPIResource):
@cached_property
def content(self) -> AsyncContent:
return AsyncContent(self._client)
@cached_property
def with_raw_response(self) -> AsyncVersionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncVersionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncVersionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncVersionsWithStreamingResponse(self)
async def create(
self,
skill_id: str,
*,
default: bool | Omit = omit,
files: Union[SequenceNotStr[FileTypes], FileTypes] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SkillVersion:
"""
Create a new immutable skill version.
Args:
default: Whether to set this version as the default.
files: Skill files to upload (directory upload) or a single zip file.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
body = deepcopy_minimal(
{
"default": default,
"files": files,
}
)
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", "<array>"], ["files"]])
if extracted_files:
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
f"/skills/{skill_id}/versions",
body=await async_maybe_transform(body, version_create_params.VersionCreateParams),
files=extracted_files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=SkillVersion,
)
async def retrieve(
self,
version: str,
*,
skill_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SkillVersion:
"""
Get a specific skill version.
Args:
version: The version number to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
if not version:
raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
return await self._get(
f"/skills/{skill_id}/versions/{version}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=SkillVersion,
)
def list(
self,
skill_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[SkillVersion, AsyncCursorPage[SkillVersion]]:
"""
List skill versions for a skill.
Args:
after: The skill version ID to start after.
limit: Number of versions to retrieve.
order: Sort order of results by version number.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
return self._get_api_list(
f"/skills/{skill_id}/versions",
page=AsyncCursorPage[SkillVersion],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
version_list_params.VersionListParams,
),
),
model=SkillVersion,
)
async def delete(
self,
version: str,
*,
skill_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> DeletedSkillVersion:
"""
Delete a skill version.
Args:
version: The skill version number.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
if not version:
raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
return await self._delete(
f"/skills/{skill_id}/versions/{version}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=DeletedSkillVersion,
)
class VersionsWithRawResponse:
def __init__(self, versions: Versions) -> None:
self._versions = versions
self.create = _legacy_response.to_raw_response_wrapper(
versions.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
versions.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
versions.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
versions.delete,
)
@cached_property
def content(self) -> ContentWithRawResponse:
return ContentWithRawResponse(self._versions.content)
class AsyncVersionsWithRawResponse:
def __init__(self, versions: AsyncVersions) -> None:
self._versions = versions
self.create = _legacy_response.async_to_raw_response_wrapper(
versions.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
versions.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
versions.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
versions.delete,
)
@cached_property
def content(self) -> AsyncContentWithRawResponse:
return AsyncContentWithRawResponse(self._versions.content)
class VersionsWithStreamingResponse:
def __init__(self, versions: Versions) -> None:
self._versions = versions
self.create = to_streamed_response_wrapper(
versions.create,
)
self.retrieve = to_streamed_response_wrapper(
versions.retrieve,
)
self.list = to_streamed_response_wrapper(
versions.list,
)
self.delete = to_streamed_response_wrapper(
versions.delete,
)
@cached_property
def content(self) -> ContentWithStreamingResponse:
return ContentWithStreamingResponse(self._versions.content)
class AsyncVersionsWithStreamingResponse:
def __init__(self, versions: AsyncVersions) -> None:
self._versions = versions
self.create = async_to_streamed_response_wrapper(
versions.create,
)
self.retrieve = async_to_streamed_response_wrapper(
versions.retrieve,
)
self.list = async_to_streamed_response_wrapper(
versions.list,
)
self.delete = async_to_streamed_response_wrapper(
versions.delete,
)
@cached_property
def content(self) -> AsyncContentWithStreamingResponse:
return AsyncContentWithStreamingResponse(self._versions.content)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/skills/versions/versions.py",
"license": "Apache License 2.0",
"lines": 457,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/types/deleted_skill.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from .._models import BaseModel
__all__ = ["DeletedSkill"]
class DeletedSkill(BaseModel):
id: str
deleted: bool
object: Literal["skill.deleted"]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/deleted_skill.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/container_auto.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .inline_skill import InlineSkill
from .skill_reference import SkillReference
from .container_network_policy_disabled import ContainerNetworkPolicyDisabled
from .container_network_policy_allowlist import ContainerNetworkPolicyAllowlist
__all__ = ["ContainerAuto", "NetworkPolicy", "Skill"]
NetworkPolicy: TypeAlias = Annotated[
Union[ContainerNetworkPolicyDisabled, ContainerNetworkPolicyAllowlist], PropertyInfo(discriminator="type")
]
Skill: TypeAlias = Annotated[Union[SkillReference, InlineSkill], PropertyInfo(discriminator="type")]
class ContainerAuto(BaseModel):
type: Literal["container_auto"]
"""Automatically creates a container for this request"""
file_ids: Optional[List[str]] = None
"""An optional list of uploaded files to make available to your code."""
memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] = None
"""The memory limit for the container."""
network_policy: Optional[NetworkPolicy] = None
"""Network access policy for the container."""
skills: Optional[List[Skill]] = None
"""An optional list of skills referenced by id or inline data."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/container_auto.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/container_auto_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..._types import SequenceNotStr
from .inline_skill_param import InlineSkillParam
from .skill_reference_param import SkillReferenceParam
from .container_network_policy_disabled_param import ContainerNetworkPolicyDisabledParam
from .container_network_policy_allowlist_param import ContainerNetworkPolicyAllowlistParam
__all__ = ["ContainerAutoParam", "NetworkPolicy", "Skill"]
NetworkPolicy: TypeAlias = Union[ContainerNetworkPolicyDisabledParam, ContainerNetworkPolicyAllowlistParam]
Skill: TypeAlias = Union[SkillReferenceParam, InlineSkillParam]
class ContainerAutoParam(TypedDict, total=False):
type: Required[Literal["container_auto"]]
"""Automatically creates a container for this request"""
file_ids: SequenceNotStr[str]
"""An optional list of uploaded files to make available to your code."""
memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]]
"""The memory limit for the container."""
network_policy: NetworkPolicy
"""Network access policy for the container."""
skills: Iterable[Skill]
"""An optional list of skills referenced by id or inline data."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/container_auto_param.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/container_network_policy_allowlist.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .container_network_policy_domain_secret import ContainerNetworkPolicyDomainSecret
__all__ = ["ContainerNetworkPolicyAllowlist"]
class ContainerNetworkPolicyAllowlist(BaseModel):
allowed_domains: List[str]
"""A list of allowed domains when type is `allowlist`."""
type: Literal["allowlist"]
"""Allow outbound network access only to specified domains. Always `allowlist`."""
domain_secrets: Optional[List[ContainerNetworkPolicyDomainSecret]] = None
"""Optional domain-scoped secrets for allowlisted domains."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/container_network_policy_allowlist.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/container_network_policy_allowlist_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Iterable
from typing_extensions import Literal, Required, TypedDict
from ..._types import SequenceNotStr
from .container_network_policy_domain_secret_param import ContainerNetworkPolicyDomainSecretParam
__all__ = ["ContainerNetworkPolicyAllowlistParam"]
class ContainerNetworkPolicyAllowlistParam(TypedDict, total=False):
allowed_domains: Required[SequenceNotStr[str]]
"""A list of allowed domains when type is `allowlist`."""
type: Required[Literal["allowlist"]]
"""Allow outbound network access only to specified domains. Always `allowlist`."""
domain_secrets: Iterable[ContainerNetworkPolicyDomainSecretParam]
"""Optional domain-scoped secrets for allowlisted domains."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/container_network_policy_allowlist_param.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/container_network_policy_disabled.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ContainerNetworkPolicyDisabled"]
class ContainerNetworkPolicyDisabled(BaseModel):
type: Literal["disabled"]
"""Disable outbound network access. Always `disabled`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/container_network_policy_disabled.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/container_network_policy_disabled_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ContainerNetworkPolicyDisabledParam"]
class ContainerNetworkPolicyDisabledParam(TypedDict, total=False):
type: Required[Literal["disabled"]]
"""Disable outbound network access. Always `disabled`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/container_network_policy_disabled_param.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/container_network_policy_domain_secret.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from ..._models import BaseModel
__all__ = ["ContainerNetworkPolicyDomainSecret"]
class ContainerNetworkPolicyDomainSecret(BaseModel):
domain: str
"""The domain associated with the secret."""
name: str
"""The name of the secret to inject for the domain."""
value: str
"""The secret value to inject for the domain."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/container_network_policy_domain_secret.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/container_network_policy_domain_secret_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Required, TypedDict
__all__ = ["ContainerNetworkPolicyDomainSecretParam"]
class ContainerNetworkPolicyDomainSecretParam(TypedDict, total=False):
domain: Required[str]
"""The domain associated with the secret."""
name: Required[str]
"""The name of the secret to inject for the domain."""
value: Required[str]
"""The secret value to inject for the domain."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/container_network_policy_domain_secret_param.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/container_reference.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ContainerReference"]
class ContainerReference(BaseModel):
container_id: str
"""The ID of the referenced container."""
type: Literal["container_reference"]
"""References a container created with the /v1/containers endpoint"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/container_reference.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/container_reference_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ContainerReferenceParam"]
class ContainerReferenceParam(TypedDict, total=False):
container_id: Required[str]
"""The ID of the referenced container."""
type: Required[Literal["container_reference"]]
"""References a container created with the /v1/containers endpoint"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/container_reference_param.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/inline_skill.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
from .inline_skill_source import InlineSkillSource
__all__ = ["InlineSkill"]
class InlineSkill(BaseModel):
description: str
"""The description of the skill."""
name: str
"""The name of the skill."""
source: InlineSkillSource
"""Inline skill payload"""
type: Literal["inline"]
"""Defines an inline skill for this request."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/inline_skill.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/inline_skill_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
from .inline_skill_source_param import InlineSkillSourceParam
__all__ = ["InlineSkillParam"]
class InlineSkillParam(TypedDict, total=False):
description: Required[str]
"""The description of the skill."""
name: Required[str]
"""The name of the skill."""
source: Required[InlineSkillSourceParam]
"""Inline skill payload"""
type: Required[Literal["inline"]]
"""Defines an inline skill for this request."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/inline_skill_param.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/inline_skill_source.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["InlineSkillSource"]
class InlineSkillSource(BaseModel):
"""Inline skill payload"""
data: str
"""Base64-encoded skill zip bundle."""
media_type: Literal["application/zip"]
"""The media type of the inline skill payload. Must be `application/zip`."""
type: Literal["base64"]
"""The type of the inline skill source. Must be `base64`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/inline_skill_source.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/inline_skill_source_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["InlineSkillSourceParam"]
class InlineSkillSourceParam(TypedDict, total=False):
"""Inline skill payload"""
data: Required[str]
"""Base64-encoded skill zip bundle."""
media_type: Required[Literal["application/zip"]]
"""The media type of the inline skill payload. Must be `application/zip`."""
type: Required[Literal["base64"]]
"""The type of the inline skill source. Must be `base64`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/inline_skill_source_param.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/local_environment.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .local_skill import LocalSkill
__all__ = ["LocalEnvironment"]
class LocalEnvironment(BaseModel):
type: Literal["local"]
"""Use a local computer environment."""
skills: Optional[List[LocalSkill]] = None
"""An optional list of skills."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/local_environment.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/local_environment_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Iterable
from typing_extensions import Literal, Required, TypedDict
from .local_skill_param import LocalSkillParam
__all__ = ["LocalEnvironmentParam"]
class LocalEnvironmentParam(TypedDict, total=False):
type: Required[Literal["local"]]
"""Use a local computer environment."""
skills: Iterable[LocalSkillParam]
"""An optional list of skills."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/local_environment_param.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/local_skill.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from ..._models import BaseModel
__all__ = ["LocalSkill"]
class LocalSkill(BaseModel):
description: str
"""The description of the skill."""
name: str
"""The name of the skill."""
path: str
"""The path to the directory containing the skill."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/local_skill.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/local_skill_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Required, TypedDict
__all__ = ["LocalSkillParam"]
class LocalSkillParam(TypedDict, total=False):
description: Required[str]
"""The description of the skill."""
name: Required[str]
"""The name of the skill."""
path: Required[str]
"""The path to the directory containing the skill."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/local_skill_param.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_container_reference.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseContainerReference"]
class ResponseContainerReference(BaseModel):
"""Represents a container created with /v1/containers."""
container_id: str
type: Literal["container_reference"]
"""The environment type. Always `container_reference`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_container_reference.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.