Increase max tokens and add gpt-5
Browse files- src/pips/core.py +3 -3
- src/pips/model_registry.py +3 -1
- src/pips/models.py +4 -4
src/pips/core.py
CHANGED
|
@@ -30,7 +30,7 @@ class PIPSSolver:
|
|
| 30 |
*,
|
| 31 |
max_iterations: int = 8,
|
| 32 |
temperature: float = 0.0,
|
| 33 |
-
max_tokens: int =
|
| 34 |
top_p: float = 1.0,
|
| 35 |
interactive: bool = False,
|
| 36 |
critic_model: Optional[LLMModel] = None,
|
|
@@ -477,10 +477,10 @@ Make sure to follow these formatting requirements exactly.
|
|
| 477 |
img_b64 = img2base64(sample.image_input)
|
| 478 |
content = [
|
| 479 |
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_b64}"}},
|
| 480 |
-
{"type": "text", "text": sample.text_input},
|
| 481 |
]
|
| 482 |
else:
|
| 483 |
-
content = sample.text_input
|
| 484 |
|
| 485 |
conv = [
|
| 486 |
{"role": "system", "content": system_content},
|
|
|
|
| 30 |
*,
|
| 31 |
max_iterations: int = 8,
|
| 32 |
temperature: float = 0.0,
|
| 33 |
+
max_tokens: int = 50000,
|
| 34 |
top_p: float = 1.0,
|
| 35 |
interactive: bool = False,
|
| 36 |
critic_model: Optional[LLMModel] = None,
|
|
|
|
| 477 |
img_b64 = img2base64(sample.image_input)
|
| 478 |
content = [
|
| 479 |
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_b64}"}},
|
| 480 |
+
{"type": "text", "text": f"Question: {sample.text_input}"},
|
| 481 |
]
|
| 482 |
else:
|
| 483 |
+
content = f"Question: {sample.text_input}"
|
| 484 |
|
| 485 |
conv = [
|
| 486 |
{"role": "system", "content": system_content},
|
src/pips/model_registry.py
CHANGED
|
@@ -61,6 +61,8 @@ def _initialize_default_models():
|
|
| 61 |
"""Initialize the registry with default models."""
|
| 62 |
|
| 63 |
# OpenAI Models
|
|
|
|
|
|
|
| 64 |
register_model("gpt-4.1-2025-04-14", "openai", "OpenAI GPT-4.1")
|
| 65 |
register_model("gpt-4o-2024-08-06", "openai", "OpenAI GPT-4o")
|
| 66 |
register_model("gpt-4.1-mini-2025-04-14", "openai", "OpenAI GPT-4.1 Mini")
|
|
@@ -78,4 +80,4 @@ def _initialize_default_models():
|
|
| 78 |
register_model("claude-3-5-haiku-latest", "anthropic", "Anthropic Claude 3.5 Haiku")
|
| 79 |
|
| 80 |
# Initialize default models when module is imported
|
| 81 |
-
_initialize_default_models()
|
|
|
|
| 61 |
"""Initialize the registry with default models."""
|
| 62 |
|
| 63 |
# OpenAI Models
|
| 64 |
+
register_model("gpt-5", "openai", "OpenAI GPT-5")
|
| 65 |
+
register_model("gpt-5-mini", "openai", "OpenAI GPT-5-mini")
|
| 66 |
register_model("gpt-4.1-2025-04-14", "openai", "OpenAI GPT-4.1")
|
| 67 |
register_model("gpt-4o-2024-08-06", "openai", "OpenAI GPT-4o")
|
| 68 |
register_model("gpt-4.1-mini-2025-04-14", "openai", "OpenAI GPT-4.1 Mini")
|
|
|
|
| 80 |
register_model("claude-3-5-haiku-latest", "anthropic", "Anthropic Claude 3.5 Haiku")
|
| 81 |
|
| 82 |
# Initialize default models when module is imported
|
| 83 |
+
_initialize_default_models()
|
src/pips/models.py
CHANGED
|
@@ -124,8 +124,8 @@ class LLMModel:
|
|
| 124 |
class OpenAIModel(LLMModel):
|
| 125 |
"""
|
| 126 |
OpenAI GPT model interface.
|
| 127 |
-
|
| 128 |
-
Supports GPT-4, GPT-4o, o3, and
|
| 129 |
of different model requirements (reasoning effort for o3/o4 models).
|
| 130 |
"""
|
| 131 |
|
|
@@ -188,7 +188,7 @@ class OpenAIModel(LLMModel):
|
|
| 188 |
extra_args = {}
|
| 189 |
|
| 190 |
# Configure parameters based on model type
|
| 191 |
-
if "o3" in self.model_name or "o4" in self.model_name:
|
| 192 |
# Reasoning models have special parameters
|
| 193 |
extra_args["reasoning_effort"] = "medium"
|
| 194 |
extra_args["max_completion_tokens"] = 20000
|
|
@@ -233,7 +233,7 @@ class OpenAIModel(LLMModel):
|
|
| 233 |
extra_args = {}
|
| 234 |
|
| 235 |
# Configure parameters based on model type
|
| 236 |
-
if "o3" in self.model_name or "o4" in self.model_name:
|
| 237 |
extra_args["reasoning_effort"] = "medium"
|
| 238 |
extra_args["max_completion_tokens"] = 20000
|
| 239 |
else:
|
|
|
|
| 124 |
class OpenAIModel(LLMModel):
|
| 125 |
"""
|
| 126 |
OpenAI GPT model interface.
|
| 127 |
+
|
| 128 |
+
Supports GPT-4, GPT-4o, o3, o4, and gpt-5 model families with proper handling
|
| 129 |
of different model requirements (reasoning effort for o3/o4 models).
|
| 130 |
"""
|
| 131 |
|
|
|
|
| 188 |
extra_args = {}
|
| 189 |
|
| 190 |
# Configure parameters based on model type
|
| 191 |
+
if "o3" in self.model_name or "o4" in self.model_name or "gpt-5" in self.model_name:
|
| 192 |
# Reasoning models have special parameters
|
| 193 |
extra_args["reasoning_effort"] = "medium"
|
| 194 |
extra_args["max_completion_tokens"] = 20000
|
|
|
|
| 233 |
extra_args = {}
|
| 234 |
|
| 235 |
# Configure parameters based on model type
|
| 236 |
+
if "o3" in self.model_name or "o4" in self.model_name or "gpt-5" in self.model_name:
|
| 237 |
extra_args["reasoning_effort"] = "medium"
|
| 238 |
extra_args["max_completion_tokens"] = 20000
|
| 239 |
else:
|