File size: 2,236 Bytes
07fffa0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

"""
REPL Environment for OpenEnv.

A Python REPL environment for training language models on code execution tasks,
based on the Recursive Language Models (RLM) paradigm.

This environment allows language models to:
- Execute Python code in a sandboxed REPL
- Work with large contexts loaded as variables
- Finalize answers via FINAL(), FINAL_VAR(), or answer dict pattern
- Optionally make recursive LLM calls via llm_query() / llm_query_batched()

Example:
    >>> from repl_env import REPLEnv, REPLAction
    >>>
    >>> # Start from Docker
    >>> env = REPLEnv.from_docker_image("repl-env:latest")
    >>>
    >>> # Reset with context
    >>> result = env.reset(context="Hello World", task_prompt="Count characters")
    >>>
    >>> # Execute code
    >>> result = env.execute("count = len(context)")
    >>> result = env.execute("print(f'FINAL({count})')")
    >>>
    >>> # Check result
    >>> print(f"Done: {result.done}, Answer: {result.observation.metadata['final_answer']}")
    >>>
    >>> env.close()

References:
    - RLM Paper: https://arxiv.org/abs/2512.24601
    - Prime Intellect Blog: https://www.primeintellect.ai/blog/rlm
    - Alex Zhang Blog: https://alexzhang13.github.io/blog/2025/rlm/
"""

from .models import REPLAction, REPLObservation, REPLState, CodeBlockResult
from .client import REPLEnv
from .prompts import (
    # System prompts
    RLM_SYSTEM_PROMPT,
    RLM_SYSTEM_PROMPT_QWEN,
    # Prompt building
    QueryMetadata,
    build_rlm_system_prompt,
    build_user_prompt,
    build_initial_prompt,
    # Parsing utilities
    extract_code_blocks,
    format_observation,
)

__all__ = [
    # Models
    "REPLAction",
    "REPLObservation",
    "REPLState",
    "CodeBlockResult",
    # Client
    "REPLEnv",
    # System prompts
    "RLM_SYSTEM_PROMPT",
    "RLM_SYSTEM_PROMPT_QWEN",
    # Prompt building
    "QueryMetadata",
    "build_rlm_system_prompt",
    "build_user_prompt",
    "build_initial_prompt",
    # Parsing utilities
    "extract_code_blocks",
    "format_observation",
]