File size: 6,235 Bytes
b74674a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Rubrics for the REPL environment.
Follows the OpenEnv Rubric system (RFC 004) to provide composable,
outcome-based rewards suitable for RL training (GRPO, etc.).
The key insight from DSPy GRPO and Daytona RL guides: the RLM is a pure
inference engine. Reward computation is external — it compares the final
answer against ground truth. The environment provides the reward via rubrics;
the training framework consumes it.
"""
from __future__ import annotations
from typing import Any, Callable
from openenv.core.rubrics.base import Rubric
class ExactMatchRubric(Rubric):
"""Outcome rubric: 1.0 if final answer matches expected, 0.0 otherwise.
This is the standard outcome-based reward used by GRPO-style training.
The expected answer is set via `set_expected()` at reset time.
"""
def __init__(self, normalize: bool = True) -> None:
super().__init__()
self._expected: str | None = None
self._normalize = normalize
def set_expected(self, expected: str | None) -> None:
self._expected = expected
def forward(self, action: Any, observation: Any) -> float:
if self._expected is None:
return 0.0
if not getattr(observation, "done", False):
return 0.0
final = getattr(observation, "metadata", {}).get("final_answer")
if final is None:
return 0.0
if self._normalize:
return (
1.0
if str(final).strip().lower() == str(self._expected).strip().lower()
else 0.0
)
return 1.0 if str(final) == str(self._expected) else 0.0
def reset(self) -> None:
self._expected = None
class FuzzyMatchRubric(Rubric):
"""Outcome rubric: partial credit based on string containment.
Returns 1.0 for exact match, 0.5 if expected is contained in the answer
(or vice versa), 0.0 otherwise. Useful for tasks where partial matches
are acceptable.
"""
def __init__(self) -> None:
super().__init__()
self._expected: str | None = None
def set_expected(self, expected: str | None) -> None:
self._expected = expected
def forward(self, action: Any, observation: Any) -> float:
if self._expected is None:
return 0.0
if not getattr(observation, "done", False):
return 0.0
final = getattr(observation, "metadata", {}).get("final_answer")
if final is None:
return 0.0
final_norm = str(final).strip().lower()
expected_norm = str(self._expected).strip().lower()
if final_norm == expected_norm:
return 1.0
if expected_norm in final_norm or final_norm in expected_norm:
return 0.5
return 0.0
def reset(self) -> None:
self._expected = None
class CustomMetricRubric(Rubric):
"""Outcome rubric using a user-provided metric function.
This mirrors the DSPy GRPO pattern where the user provides
`metric(expected, predicted) -> float`.
"""
def __init__(self, metric_fn: Callable[[str, str], float]) -> None:
super().__init__()
self._metric_fn = metric_fn
self._expected: str | None = None
def set_expected(self, expected: str | None) -> None:
self._expected = expected
def forward(self, action: Any, observation: Any) -> float:
if self._expected is None:
return 0.0
if not getattr(observation, "done", False):
return 0.0
final = getattr(observation, "metadata", {}).get("final_answer")
if final is None:
return 0.0
return self._metric_fn(str(self._expected), str(final))
def reset(self) -> None:
self._expected = None
class CodeExecutionRubric(Rubric):
"""Process rubric: per-step signal based on code execution success.
Returns a small positive reward for successful execution,
a negative reward for errors, 0.0 for non-terminal steps.
"""
def __init__(
self,
success_reward: float = 0.0,
error_penalty: float = -0.05,
) -> None:
super().__init__()
self.success_reward = success_reward
self.error_penalty = error_penalty
def forward(self, action: Any, observation: Any) -> float:
result = getattr(observation, "result", None)
if result is None:
return 0.0
if not getattr(result, "success", True):
return self.error_penalty
return self.success_reward
class REPLRubric(Rubric):
"""Composite rubric for the REPL environment.
Combines outcome-based reward (final answer correctness) with
optional process-based reward (code execution quality).
The outcome rubric is only evaluated on terminal steps (done=True).
The process rubric is evaluated on every step.
"""
def __init__(
self,
outcome: Rubric | None = None,
process: Rubric | None = None,
failure_reward: float = -0.1,
) -> None:
super().__init__()
self.outcome = outcome or ExactMatchRubric()
self.process = process or CodeExecutionRubric()
self.failure_reward = failure_reward
def set_expected(self, expected: str | None) -> None:
"""Pass expected answer to the outcome rubric."""
if hasattr(self.outcome, "set_expected"):
self.outcome.set_expected(expected)
def forward(self, action: Any, observation: Any) -> float:
done = getattr(observation, "done", False)
if done:
final = getattr(observation, "metadata", {}).get("final_answer")
if final is not None:
return self.outcome(action, observation)
# Done but no final answer (max iterations exhausted)
return self.failure_reward
# Non-terminal step: process reward only
return self.process(action, observation)
def reset(self) -> None:
self.outcome.reset()
self.process.reset()
|