code stringlengths 66 870k | docstring stringlengths 19 26.7k | func_name stringlengths 1 138 | language stringclasses 1
value | repo stringlengths 7 68 | path stringlengths 5 324 | url stringlengths 46 389 | license stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def try_simplify_array_to_value(np_array):
"""If given numpy array has all the same values, returns that value."""
first_value = np_array.item(0)
if np.all(np_array == first_value):
return np.array(first_value, dtype=np_array.dtype)
else:
return np_array | If given numpy array has all the same values, returns that value. | try_simplify_array_to_value | python | tensorflow/agents | tf_agents/environments/gym_wrapper.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/gym_wrapper.py | Apache-2.0 |
def nested_spec(spec, child_name):
"""Returns the nested spec with a unique name."""
nested_name = name + '/' + child_name if name else child_name
return spec_from_gym_space(
spec, dtype_map, simplify_box_bounds, nested_name
) | Returns the nested spec with a unique name. | nested_spec | python | tensorflow/agents | tf_agents/environments/gym_wrapper.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/gym_wrapper.py | Apache-2.0 |
def __getattr__(self, name: Text) -> Any:
"""Forward all other calls to the base environment."""
gym_env = super(GymWrapper, self).__getattribute__('_gym_env')
return getattr(gym_env, name) | Forward all other calls to the base environment. | __getattr__ | python | tensorflow/agents | tf_agents/environments/gym_wrapper.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/gym_wrapper.py | Apache-2.0 |
def _to_obs_space_dtype(self, observation):
"""Make sure observation matches the specified space.
Observation spaces in gym didn't have a dtype for a long time. Now that they
do there is a large number of environments that do not follow the dtype in
the space definition. Since we use the space definiti... | Make sure observation matches the specified space.
Observation spaces in gym didn't have a dtype for a long time. Now that they
do there is a large number of environments that do not follow the dtype in
the space definition. Since we use the space definition to create the
tensorflow graph we need to ma... | _to_obs_space_dtype | python | tensorflow/agents | tf_agents/environments/gym_wrapper.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/gym_wrapper.py | Apache-2.0 |
def __init__(
self,
env_constructors: Sequence[EnvConstructor],
start_serially: bool = True,
blocking: bool = False,
flatten: bool = False,
):
"""Batch together environments and simulate them in external processes.
The environments can be different but must use the same action a... | Batch together environments and simulate them in external processes.
The environments can be different but must use the same action and
observation specs.
Args:
env_constructors: List of callables that create environments.
start_serially: Whether to start environments serially or in parallel.
... | __init__ | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def _reset(self):
"""Reset all environments and combine the resulting observation.
Returns:
Time step with batch dimension.
"""
time_steps = [env.reset(self._blocking) for env in self._envs]
if not self._blocking:
time_steps = [promise() for promise in time_steps]
return self._stack... | Reset all environments and combine the resulting observation.
Returns:
Time step with batch dimension.
| _reset | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def _step(self, actions):
"""Forward a batch of actions to the wrapped environments.
Args:
actions: Batched action, possibly nested, to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags.
"""
time_steps = [
... | Forward a batch of actions to the wrapped environments.
Args:
actions: Batched action, possibly nested, to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags.
| _step | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def _stack_time_steps(self, time_steps):
"""Given a list of TimeStep, combine to one with a batch dimension."""
if self._flatten:
return nest_utils.fast_map_structure_flatten(
lambda *arrays: np.stack(arrays), self._time_step_spec, *time_steps
)
else:
return nest_utils.fast_map_s... | Given a list of TimeStep, combine to one with a batch dimension. | _stack_time_steps | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def _unstack_actions(self, batched_actions):
"""Returns a list of actions from potentially nested batch of actions."""
flattened_actions = tf.nest.flatten(batched_actions)
if self._flatten:
unstacked_actions = zip(*flattened_actions)
else:
unstacked_actions = [
tf.nest.pack_sequenc... | Returns a list of actions from potentially nested batch of actions. | _unstack_actions | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def render(self, mode: Text = 'rgb_array') -> types.NestedArray:
"""Renders the environment.
Args:
mode: Rendering mode. Currently only 'rgb_array' is supported because this
is a batched environment.
Returns:
An ndarray of shape [batch_size, width, height, 3] denoting RGB images
... | Renders the environment.
Args:
mode: Rendering mode. Currently only 'rgb_array' is supported because this
is a batched environment.
Returns:
An ndarray of shape [batch_size, width, height, 3] denoting RGB images
(for mode=`rgb_array`).
Raises:
NotImplementedError: If the en... | render | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def __init__(self, env_constructor: EnvConstructor, flatten: bool = False):
"""Step environment in a separate process for lock free paralellism.
The environment is created in an external process by calling the provided
callable. This can be an environment class, or a function creating the
environment a... | Step environment in a separate process for lock free paralellism.
The environment is created in an external process by calling the provided
callable. This can be an environment class, or a function creating the
environment and potentially wrapping it. The returned environment should
not access global v... | __init__ | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def start(self, wait_to_start: bool = True) -> None:
"""Start the process.
Args:
wait_to_start: Whether the call should wait for an env initialization.
"""
mp_context = multiprocessing.get_context()
self._conn, conn = mp_context.Pipe()
self._process = mp_context.Process(target=self._worke... | Start the process.
Args:
wait_to_start: Whether the call should wait for an env initialization.
| start | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def wait_start(self) -> None:
"""Wait for the started process to finish initialization."""
result = self._conn.recv()
if isinstance(result, Exception):
self._conn.close()
self._process.join(5)
raise result
assert result == self._READY, result | Wait for the started process to finish initialization. | wait_start | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def __getattr__(self, name: Text) -> Any:
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
This method is only called if the attribute is not found in the dictionary
of `ParallelPyEnvironment`'s definition.
Args:... | Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
This method is only called if the attribute is not found in the dictionary
of `ParallelPyEnvironment`'s definition.
Args:
name: Attribute to access.
Returns:
... | __getattr__ | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def call(self, name: Text, *args, **kwargs) -> Promise:
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
The attr... | Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
The attribute.
| call | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def close(self) -> None:
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
if self._process.is_alive():
self._process.join(5) | Send a close message to the external process and join it. | close | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def step(
self, action: types.NestedArray, blocking: bool = True
) -> Union[ts.TimeStep, Promise]:
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
time step when blocking, otherwise callable that re... | Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
time step when blocking, otherwise callable that returns the time step.
| step | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def reset(self, blocking: bool = True) -> Union[ts.TimeStep, Promise]:
"""Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
"""
promise = self.call('reset')
if bl... | Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
| reset | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def render(
self, mode: Text = 'rgb_array', blocking: bool = True
) -> Union[types.NestedArray, Promise]:
"""Renders the environment.
Args:
mode: Rendering mode. Only 'rgb_array' is supported.
blocking: Whether to wait for the result.
Returns:
An ndarray of shape [width, height, ... | Renders the environment.
Args:
mode: Rendering mode. Only 'rgb_array' is supported.
blocking: Whether to wait for the result.
Returns:
An ndarray of shape [width, height, 3] denoting an RGB image when
blocking. Otherwise, callable that returns the rendered image.
Raises:
NotI... | render | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The reveived message is of an unknown type.
Returns:
Payload object of the message.
"""
message, payload = sel... | Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The reveived message is of an unknown type.
Returns:
Payload object of the message.
| _receive | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def _worker(self, conn):
"""The process waits for actions and sends back environment results.
Args:
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
"""
try:
env = cloudpickle.loads(self._pickled_env_constructor)(... | The process waits for actions and sends back environment results.
Args:
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
| _worker | python | tensorflow/agents | tf_agents/environments/parallel_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/parallel_py_environment.py | Apache-2.0 |
def __init__(self, handle_auto_reset: bool = False):
"""Base class for Python RL environments.
Args:
handle_auto_reset: When `True` the base class will handle auto_reset of
the Environment.
"""
self._handle_auto_reset = handle_auto_reset
self._current_time_step = None
common.asser... | Base class for Python RL environments.
Args:
handle_auto_reset: When `True` the base class will handle auto_reset of
the Environment.
| __init__ | python | tensorflow/agents | tf_agents/environments/py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_environment.py | Apache-2.0 |
def batch_size(self) -> Optional[int]:
"""The batch size of the environment.
Returns:
The batch size of the environment, or `None` if the environment is not
batched.
Raises:
RuntimeError: If a subclass overrode batched to return True but did not
override the batch_size property.
... | The batch size of the environment.
Returns:
The batch size of the environment, or `None` if the environment is not
batched.
Raises:
RuntimeError: If a subclass overrode batched to return True but did not
override the batch_size property.
| batch_size | python | tensorflow/agents | tf_agents/environments/py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_environment.py | Apache-2.0 |
def should_reset(self, current_time_step: ts.TimeStep) -> bool:
"""Whether the Environmet should reset given the current timestep.
By default it only resets when all time_steps are `LAST`.
Args:
current_time_step: The current `TimeStep`.
Returns:
A bool indicating whether the Environment ... | Whether the Environmet should reset given the current timestep.
By default it only resets when all time_steps are `LAST`.
Args:
current_time_step: The current `TimeStep`.
Returns:
A bool indicating whether the Environment should reset or not.
| should_reset | python | tensorflow/agents | tf_agents/environments/py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_environment.py | Apache-2.0 |
def observation_spec(self) -> types.NestedArraySpec:
"""Defines the observations provided by the environment.
May use a subclass of `ArraySpec` that specifies additional properties such
as min and max bounds on the values.
Returns:
An `ArraySpec`, or a nested dict, list or tuple of `ArraySpec`s.... | Defines the observations provided by the environment.
May use a subclass of `ArraySpec` that specifies additional properties such
as min and max bounds on the values.
Returns:
An `ArraySpec`, or a nested dict, list or tuple of `ArraySpec`s.
| observation_spec | python | tensorflow/agents | tf_agents/environments/py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_environment.py | Apache-2.0 |
def action_spec(self) -> types.NestedArraySpec:
"""Defines the actions that should be provided to `step()`.
May use a subclass of `ArraySpec` that specifies additional properties such
as min and max bounds on the values.
Returns:
An `ArraySpec`, or a nested dict, list or tuple of `ArraySpec`s.
... | Defines the actions that should be provided to `step()`.
May use a subclass of `ArraySpec` that specifies additional properties such
as min and max bounds on the values.
Returns:
An `ArraySpec`, or a nested dict, list or tuple of `ArraySpec`s.
| action_spec | python | tensorflow/agents | tf_agents/environments/py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_environment.py | Apache-2.0 |
def discount_spec(self) -> types.NestedArraySpec:
"""Defines the discount that are returned by `step()`.
Override this method to define an environment that uses non-standard
discount values, for example an environment with array-valued discounts.
Returns:
An `ArraySpec`, or a nested dict, list o... | Defines the discount that are returned by `step()`.
Override this method to define an environment that uses non-standard
discount values, for example an environment with array-valued discounts.
Returns:
An `ArraySpec`, or a nested dict, list or tuple of `ArraySpec`s.
| discount_spec | python | tensorflow/agents | tf_agents/environments/py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_environment.py | Apache-2.0 |
def step(self, action: types.NestedArray) -> ts.TimeStep:
"""Updates the environment according to the action and returns a `TimeStep`.
If the environment returned a `TimeStep` with `StepType.LAST` at the
previous step the implementation of `_step` in the environment should call
`reset` to start a new s... | Updates the environment according to the action and returns a `TimeStep`.
If the environment returned a `TimeStep` with `StepType.LAST` at the
previous step the implementation of `_step` in the environment should call
`reset` to start a new sequence and ignore `action`.
This method will start a new se... | step | python | tensorflow/agents | tf_agents/environments/py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_environment.py | Apache-2.0 |
def render(self, mode: Text = 'rgb_array') -> Optional[types.NestedArray]:
"""Renders the environment.
Args:
mode: One of ['rgb_array', 'human']. Renders to an numpy array, or brings
up a window where the environment can be visualized.
Returns:
An ndarray of shape [width, height, 3] de... | Renders the environment.
Args:
mode: One of ['rgb_array', 'human']. Renders to an numpy array, or brings
up a window where the environment can be visualized.
Returns:
An ndarray of shape [width, height, 3] denoting an RGB image if mode is
`rgb_array`. Otherwise return nothing and ren... | render | python | tensorflow/agents | tf_agents/environments/py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_environment.py | Apache-2.0 |
def seed(self, seed: types.Seed) -> Any:
"""Seeds the environment.
Args:
seed: Value to use as seed for the environment.
"""
del seed # unused
raise NotImplementedError('No seed support for this environment.') | Seeds the environment.
Args:
seed: Value to use as seed for the environment.
| seed | python | tensorflow/agents | tf_agents/environments/py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_environment.py | Apache-2.0 |
def get_state(self) -> Any:
"""Returns the `state` of the environment.
The `state` contains everything required to restore the environment to the
current configuration. This can contain e.g.
- The current time_step.
- The number of steps taken in the environment (for finite horizon MDPs).
... | Returns the `state` of the environment.
The `state` contains everything required to restore the environment to the
current configuration. This can contain e.g.
- The current time_step.
- The number of steps taken in the environment (for finite horizon MDPs).
- Hidden state (for POMDPs).
... | get_state | python | tensorflow/agents | tf_agents/environments/py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_environment.py | Apache-2.0 |
def set_state(self, state: Any) -> None:
"""Restores the environment to a given `state`.
See definition of `state` in the documentation for get_state().
Args:
state: A state to restore the environment to.
"""
raise NotImplementedError(
'This environment has not implemented `set_state... | Restores the environment to a given `state`.
See definition of `state` in the documentation for get_state().
Args:
state: A state to restore the environment to.
| set_state | python | tensorflow/agents | tf_agents/environments/py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_environment.py | Apache-2.0 |
def _step(self, action: types.NestedArray) -> ts.TimeStep:
"""Updates the environment according to action and returns a `TimeStep`.
See `step(self, action)` docstring for more details.
Args:
action: A NumPy array, or a nested dict, list or tuple of arrays
corresponding to `action_spec()`.
... | Updates the environment according to action and returns a `TimeStep`.
See `step(self, action)` docstring for more details.
Args:
action: A NumPy array, or a nested dict, list or tuple of arrays
corresponding to `action_spec()`.
| _step | python | tensorflow/agents | tf_agents/environments/py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_environment.py | Apache-2.0 |
def _reset(self) -> ts.TimeStep:
"""Starts a new sequence, returns the first `TimeStep` of this sequence.
See `reset(self)` docstring for more details
""" | Starts a new sequence, returns the first `TimeStep` of this sequence.
See `reset(self)` docstring for more details
| _reset | python | tensorflow/agents | tf_agents/environments/py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_environment.py | Apache-2.0 |
def _convert_action_spec(spec: tfa_spec.ArraySpec) -> dm_spec.Array:
"""Converts a TF Agents action spec to a DM action spec.
Similar to _convert_spec but changes discrete actions to DiscreteArray rather
than BoundedArray.
Args:
spec: The TF Agents action spec to convert.
Returns:
The converted DM a... | Converts a TF Agents action spec to a DM action spec.
Similar to _convert_spec but changes discrete actions to DiscreteArray rather
than BoundedArray.
Args:
spec: The TF Agents action spec to convert.
Returns:
The converted DM action spec.
| _convert_action_spec | python | tensorflow/agents | tf_agents/environments/py_to_dm_wrapper.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/py_to_dm_wrapper.py | Apache-2.0 |
def __init__(
self,
observation_spec: types.NestedArray,
action_spec: Optional[types.NestedArray] = None,
episode_end_probability: types.Float = 0.1,
discount: types.Float = 1.0,
reward_fn: Optional[RewardFn] = None,
batch_size: Optional[types.Int] = None,
auto_reset: boo... | Initializes the environment.
Args:
observation_spec: An `ArraySpec`, or a nested dict, list or tuple of
`ArraySpec`s.
action_spec: An `ArraySpec`, or a nested dict, list or tuple of
`ArraySpec`s.
episode_end_probability: Probability an episode will end when the
environment... | __init__ | python | tensorflow/agents | tf_agents/environments/random_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/random_py_environment.py | Apache-2.0 |
def __init__(
self,
time_step_spec,
action_spec,
batch_size=1,
episode_end_probability=0.1,
):
"""Initializes the environment.
Args:
time_step_spec: A `TimeStep` namedtuple containing `TensorSpec`s defining
the Tensors returned by `step()` (step_type, reward, disco... | Initializes the environment.
Args:
time_step_spec: A `TimeStep` namedtuple containing `TensorSpec`s defining
the Tensors returned by `step()` (step_type, reward, discount, and
observation).
action_spec: A nest of BoundedTensorSpec representing the actions of the
environment.
... | __init__ | python | tensorflow/agents | tf_agents/environments/random_tf_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/random_tf_environment.py | Apache-2.0 |
def _reset(self):
"""Resets the environment and returns the current time_step."""
obs, _ = self._sample_obs_and_reward()
time_step = ts.restart(
obs, self._batch_size, reward_spec=self._time_step_spec.reward
)
self._update_time_step(time_step)
return self._current_time_step() | Resets the environment and returns the current time_step. | _reset | python | tensorflow/agents | tf_agents/environments/random_tf_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/random_tf_environment.py | Apache-2.0 |
def _step(self, action):
"""Steps the environment according to the action."""
# Make sure the given action is compatible with the spec. We compare it to
# t[0] as the spec doesn't have a batch dim.
tf.nest.map_structure(
lambda spec, t: tf.Assert(spec.is_compatible_with(t[0]), [t]),
self... | Steps the environment according to the action. | _step | python | tensorflow/agents | tf_agents/environments/random_tf_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/random_tf_environment.py | Apache-2.0 |
def game(
name: Text = 'Pong',
obs_type: Text = 'image',
mode: Text = 'NoFrameskip',
version: Text = 'v0',
) -> Text:
"""Generates the full name for the game.
Args:
name: String. Ex. Pong, SpaceInvaders, ...
obs_type: String, type of observation. Ex. 'image' or 'ram'.
mode: String. Ex. ... | Generates the full name for the game.
Args:
name: String. Ex. Pong, SpaceInvaders, ...
obs_type: String, type of observation. Ex. 'image' or 'ram'.
mode: String. Ex. '', 'NoFrameskip' or 'Deterministic'.
version: String. Ex. 'v0' or 'v4'.
Returns:
The full name for the game.
| game | python | tensorflow/agents | tf_agents/environments/suite_atari.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/suite_atari.py | Apache-2.0 |
def load(
environment_name: Text,
discount: types.Int = 1.0,
max_episode_steps: Optional[types.Int] = None,
gym_env_wrappers: Sequence[
types.GymEnvWrapper
] = DEFAULT_ATARI_GYM_WRAPPERS,
env_wrappers: Sequence[types.PyEnvWrapper] = (),
spec_dtype_map: Optional[Dict[gym.Space, np.dty... | Loads the selected environment and wraps it with the specified wrappers. | load | python | tensorflow/agents | tf_agents/environments/suite_atari.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/suite_atari.py | Apache-2.0 |
def load(
bsuite_id: Text,
record: bool = True,
save_path: Optional[Text] = None,
logging_mode: Text = 'csv',
overwrite: bool = False,
) -> py_environment.PyEnvironment:
"""Loads the selected environment.
Args:
bsuite_id: a bsuite_id specifies a bsuite experiment. For an example
`bsui... | Loads the selected environment.
Args:
bsuite_id: a bsuite_id specifies a bsuite experiment. For an example
`bsuite_id` "deep_sea/7" will be 7th level of the "deep_sea" task.
record: whether to log bsuite results.
save_path: the directory to save bsuite results.
logging_mode: which form of loggi... | load | python | tensorflow/agents | tf_agents/environments/suite_bsuite.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/suite_bsuite.py | Apache-2.0 |
def _load_env(
domain_name: Text,
task_name: Text,
task_kwargs=None,
environment_kwargs=None,
visualize_reward: bool = False,
):
"""Loads a DM environment.
Args:
domain_name: A string containing the name of a domain.
task_name: A string containing the name of a task.
task_kwargs: Op... | Loads a DM environment.
Args:
domain_name: A string containing the name of a domain.
task_name: A string containing the name of a task.
task_kwargs: Optional `dict` of keyword arguments for the task.
environment_kwargs: Optional `dict` specifying keyword arguments for the
environment.
visua... | _load_env | python | tensorflow/agents | tf_agents/environments/suite_dm_control.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/suite_dm_control.py | Apache-2.0 |
def load(
domain_name: Text,
task_name: Text,
task_kwargs=None,
environment_kwargs=None,
visualize_reward: bool = False,
render_kwargs=None,
env_wrappers: Sequence[types.PyEnvWrapper] = (),
) -> py_environment.PyEnvironment:
"""Returns an environment from a domain name, task name and optio... | Returns an environment from a domain name, task name and optional settings.
Args:
domain_name: A string containing the name of a domain.
task_name: A string containing the name of a task.
task_kwargs: Optional `dict` of keyword arguments for the task.
environment_kwargs: Optional `dict` specifying ke... | load | python | tensorflow/agents | tf_agents/environments/suite_dm_control.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/suite_dm_control.py | Apache-2.0 |
def load_pixels(
domain_name: Text,
task_name: Text,
observation_key: Text = 'pixels',
pixels_only: bool = True,
task_kwargs=None,
environment_kwargs=None,
visualize_reward: bool = False,
render_kwargs=None,
env_wrappers: Sequence[types.PyEnvWrapper] = (),
env_state_wrappers: Seq... | Returns an environment from a domain name, task name and optional settings.
Args:
domain_name: A string containing the name of a domain.
task_name: A string containing the name of a task.
observation_key: Optional custom string specifying the pixel observation's
key in the `OrderedDict` of observat... | load_pixels | python | tensorflow/agents | tf_agents/environments/suite_dm_control.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/suite_dm_control.py | Apache-2.0 |
def __init__(self, time_step_spec=None, action_spec=None, batch_size=1):
"""Initializes the environment.
Meant to be called by subclass constructors.
Args:
time_step_spec: A `TimeStep` namedtuple containing `TensorSpec`s defining
the Tensors returned by `step()` (step_type, reward, discount,... | Initializes the environment.
Meant to be called by subclass constructors.
Args:
time_step_spec: A `TimeStep` namedtuple containing `TensorSpec`s defining
the Tensors returned by `step()` (step_type, reward, discount, and
observation).
action_spec: A nest of BoundedTensorSpec repres... | __init__ | python | tensorflow/agents | tf_agents/environments/tf_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/tf_environment.py | Apache-2.0 |
def _pack_named_sequence(flat_inputs, input_spec, batch_shape):
"""Assembles back a nested structure that has been flattened."""
named_inputs = []
for flat_input, spec in zip(flat_inputs, tf.nest.flatten(input_spec)):
named_input = tf.identity(flat_input, name=spec.name)
if not tf.executing_eagerly():
... | Assembles back a nested structure that has been flattened. | _pack_named_sequence | python | tensorflow/agents | tf_agents/environments/tf_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/tf_py_environment.py | Apache-2.0 |
def _check_not_called_concurrently(lock):
"""Checks the returned context is not executed concurrently with any other."""
if not lock.acquire(False): # Non-blocking.
raise RuntimeError(
'Detected concurrent execution of TFPyEnvironment ops. Make sure the '
'appropriate step_state is passed to st... | Checks the returned context is not executed concurrently with any other. | _check_not_called_concurrently | python | tensorflow/agents | tf_agents/environments/tf_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/tf_py_environment.py | Apache-2.0 |
def __init__(
self,
environment: py_environment.PyEnvironment,
check_dims: bool = False,
isolation: bool = False,
):
"""Initializes a new `TFPyEnvironment`.
Args:
environment: Environment to interact with, implementing
`py_environment.PyEnvironment`. Or a `callable` tha... | Initializes a new `TFPyEnvironment`.
Args:
environment: Environment to interact with, implementing
`py_environment.PyEnvironment`. Or a `callable` that returns an
environment of this form. If a `callable` is provided and
`thread_isolation` is provided, the callable is executed in th... | __init__ | python | tensorflow/agents | tf_agents/environments/tf_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/tf_py_environment.py | Apache-2.0 |
def __getattr__(self, name: Text) -> Any:
"""Enables access attributes of the wrapped PyEnvironment.
Use with caution since methods of the PyEnvironment can be incompatible
with TF.
Args:
name: Name of the attribute.
Returns:
The attribute.
"""
if name in self.__dict__:
... | Enables access attributes of the wrapped PyEnvironment.
Use with caution since methods of the PyEnvironment can be incompatible
with TF.
Args:
name: Name of the attribute.
Returns:
The attribute.
| __getattr__ | python | tensorflow/agents | tf_agents/environments/tf_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/tf_py_environment.py | Apache-2.0 |
def close(self) -> None:
"""Send close to wrapped env & also to the isolation pool + join it.
Only closes pool when `isolation` was provided at init time.
"""
self._env.close()
if self._pool:
self._pool.join()
self._pool.close()
self._pool = None | Send close to wrapped env & also to the isolation pool + join it.
Only closes pool when `isolation` was provided at init time.
| close | python | tensorflow/agents | tf_agents/environments/tf_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/tf_py_environment.py | Apache-2.0 |
def _current_time_step(self):
"""Returns the current ts.TimeStep.
Returns:
A `TimeStep` tuple of:
step_type: A scalar int32 tensor representing the `StepType` value.
reward: A float32 tensor representing the reward at this
timestep.
discount: A scalar float32 tensor repr... | Returns the current ts.TimeStep.
Returns:
A `TimeStep` tuple of:
step_type: A scalar int32 tensor representing the `StepType` value.
reward: A float32 tensor representing the reward at this
timestep.
discount: A scalar float32 tensor representing the discount [0, 1].
... | _current_time_step | python | tensorflow/agents | tf_agents/environments/tf_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/tf_py_environment.py | Apache-2.0 |
def _reset(self):
"""Returns the current `TimeStep` after resetting the environment.
Returns:
A `TimeStep` tuple of:
step_type: A scalar int32 tensor representing the `StepType` value.
reward: A float32 tensor representing the reward at this
timestep.
discount: A scalar ... | Returns the current `TimeStep` after resetting the environment.
Returns:
A `TimeStep` tuple of:
step_type: A scalar int32 tensor representing the `StepType` value.
reward: A float32 tensor representing the reward at this
timestep.
discount: A scalar float32 tensor representi... | _reset | python | tensorflow/agents | tf_agents/environments/tf_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/tf_py_environment.py | Apache-2.0 |
def _step(self, actions):
"""Returns a TensorFlow op to step the environment.
Args:
actions: A Tensor, or a nested dict, list or tuple of Tensors
corresponding to `action_spec()`.
Returns:
A `TimeStep` tuple of:
step_type: A scalar int32 tensor representing the `StepType` value... | Returns a TensorFlow op to step the environment.
Args:
actions: A Tensor, or a nested dict, list or tuple of Tensors
corresponding to `action_spec()`.
Returns:
A `TimeStep` tuple of:
step_type: A scalar int32 tensor representing the `StepType` value.
reward: A float32 tenso... | _step | python | tensorflow/agents | tf_agents/environments/tf_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/tf_py_environment.py | Apache-2.0 |
def render(self, mode: Text = 'rgb_array') -> Optional[types.NestedTensor]:
"""Renders the environment.
Note for compatibility this will convert the image to uint8.
Args:
mode: One of ['rgb_array', 'human']. Renders to an numpy array, or brings
up a window where the environment can be visual... | Renders the environment.
Note for compatibility this will convert the image to uint8.
Args:
mode: One of ['rgb_array', 'human']. Renders to an numpy array, or brings
up a window where the environment can be visualized.
Returns:
A Tensor of shape [width, height, 3] denoting an RGB imag... | render | python | tensorflow/agents | tf_agents/environments/tf_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/tf_py_environment.py | Apache-2.0 |
def _render(mode):
"""Pywrapper fn to the environments render."""
# Mode might be passed down as bytes or ndarray.
# If so, convert to a str first.
if isinstance(mode, np.ndarray):
mode = str(mode)
if isinstance(mode, bytes):
mode = mode.decode('utf-8')
if mode == 'rg... | Pywrapper fn to the environments render. | _render | python | tensorflow/agents | tf_agents/environments/tf_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/tf_py_environment.py | Apache-2.0 |
def _time_step_from_numpy_function_outputs(self, outputs):
"""Forms a `TimeStep` from the output of the numpy_function outputs."""
batch_shape = () if not self.batched else (self.batch_size,)
batch_shape = tf.TensorShape(batch_shape)
time_step = _pack_named_sequence(
outputs, self.time_step_spec... | Forms a `TimeStep` from the output of the numpy_function outputs. | _time_step_from_numpy_function_outputs | python | tensorflow/agents | tf_agents/environments/tf_py_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/tf_py_environment.py | Apache-2.0 |
def __init__(self, policy, time_major=False):
"""Creates a TrajectoryReplay object.
TrajectoryReplay.run returns the actions and policy info of the new policy
assuming it saw the observations from the given trajectory.
Args:
policy: A tf_policy.TFPolicy policy.
time_major: If `True`, the t... | Creates a TrajectoryReplay object.
TrajectoryReplay.run returns the actions and policy info of the new policy
assuming it saw the observations from the given trajectory.
Args:
policy: A tf_policy.TFPolicy policy.
time_major: If `True`, the tensors in `trajectory` passed to method `run`
... | __init__ | python | tensorflow/agents | tf_agents/environments/trajectory_replay.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/trajectory_replay.py | Apache-2.0 |
def process_step(
time, time_step, policy_state, output_action_tas, output_policy_info_tas
):
"""Take an action on the given step, and update output TensorArrays.
Args:
time: Step time. Describes which row to read from the trajectory
TensorArrays and which location to write i... | Take an action on the given step, and update output TensorArrays.
Args:
time: Step time. Describes which row to read from the trajectory
TensorArrays and which location to write into in the output
TensorArrays.
time_step: Previous step's `TimeStep`.
policy_state: Poli... | process_step | python | tensorflow/agents | tf_agents/environments/trajectory_replay.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/trajectory_replay.py | Apache-2.0 |
def loop_body(
time, time_step, policy_state, output_action_tas, output_policy_info_tas
):
"""Runs a step in environment.
While loop will call multiple times.
Args:
time: Step time.
time_step: Previous step's `TimeStep`.
policy_state: Policy state tensor or nested... | Runs a step in environment.
While loop will call multiple times.
Args:
time: Step time.
time_step: Previous step's `TimeStep`.
policy_state: Policy state tensor or nested structure of tensors.
output_action_tas: Updated nest of `tf.TensorArray`, the new actions.
out... | loop_body | python | tensorflow/agents | tf_agents/environments/trajectory_replay.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/trajectory_replay.py | Apache-2.0 |
def get_tf_env(
environment: Union[
py_environment.PyEnvironment, tf_environment.TFEnvironment
]
) -> tf_environment.TFEnvironment:
"""Ensures output is a tf_environment, wrapping py_environments if needed."""
if environment is None:
raise ValueError('`environment` cannot be None')
if isinstan... | Ensures output is a tf_environment, wrapping py_environments if needed. | get_tf_env | python | tensorflow/agents | tf_agents/environments/utils.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/utils.py | Apache-2.0 |
def validate_py_environment(
environment: py_environment.PyEnvironment,
episodes: int = 5,
observation_and_action_constraint_splitter: Optional[types.Splitter] = None,
action_constraint_suffix: Optional[str] = None,
):
"""Validates the environment follows the defined specs.
Args:
environment: T... | Validates the environment follows the defined specs.
Args:
environment: The environment to test.
episodes: The number of episodes to run a random policy over.
observation_and_action_constraint_splitter: A function used to process
observations with action constraints. These constraints can indicate,... | validate_py_environment | python | tensorflow/agents | tf_agents/environments/utils.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/utils.py | Apache-2.0 |
def __init__(
self,
env: py_environment.PyEnvironment,
process_profile_fn: Callable[[cProfile.Profile], Any],
process_steps: int,
):
"""Create a PerformanceProfiler that uses cProfile to profile env execution.
Args:
env: Environment to wrap.
process_profile_fn: A callback ... | Create a PerformanceProfiler that uses cProfile to profile env execution.
Args:
env: Environment to wrap.
process_profile_fn: A callback that accepts a `Profile` object. After
`process_profile_fn` is called, profile information is reset.
process_steps: The frequency with which `process_pr... | __init__ | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def __init__(
self,
env: py_environment.PyEnvironment,
times: types.Int,
handle_auto_reset: bool = False,
):
"""Creates an action repeat wrapper.
Args:
env: Environment to wrap.
times: Number of times the action should be repeated.
handle_auto_reset: When `True` the ... | Creates an action repeat wrapper.
Args:
env: Environment to wrap.
times: Number of times the action should be repeated.
handle_auto_reset: When `True` the base class will handle auto_reset of
the Environment.
Raises:
ValueError: If the times parameter is not greater than 1.
... | __init__ | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def __init__(
self,
env: py_environment.PyEnvironment,
flat_dtype=None,
handle_auto_reset: bool = False,
):
"""Creates a FlattenActionWrapper.
Args:
env: Environment to wrap.
flat_dtype: Optional, if set to a np.dtype the flat action_spec uses this
dtype.
han... | Creates a FlattenActionWrapper.
Args:
env: Environment to wrap.
flat_dtype: Optional, if set to a np.dtype the flat action_spec uses this
dtype.
handle_auto_reset: When `True` the base class will handle auto_reset of
the Environment.
Raises:
ValueError: If any of the ac... | __init__ | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def __init__(
self,
env: py_environment.PyEnvironment,
idx: Union[Sequence[int], np.ndarray],
handle_auto_reset: bool = False,
):
"""Creates an observation filter wrapper.
Args:
env: Environment to wrap.
idx: Array of indexes pointing to elements to include in output.
... | Creates an observation filter wrapper.
Args:
env: Environment to wrap.
idx: Array of indexes pointing to elements to include in output.
handle_auto_reset: When `True` the base class will handle auto_reset of
the Environment.
Raises:
ValueError: If observation spec is nested.
... | __init__ | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def _map_actions(self, action, action_map):
"""Maps the given discrete action to the corresponding continuous action.
Args:
action: Discrete action to map.
action_map: Array with the continuous linspaces for the action.
Returns:
Numpy array with the mapped continuous actions.
Raises:... | Maps the given discrete action to the corresponding continuous action.
Args:
action: Discrete action to map.
action_map: Array with the continuous linspaces for the action.
Returns:
Numpy array with the mapped continuous actions.
Raises:
ValueError: If the given action's shpe does ... | _map_actions | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def _step(self, action):
"""Steps the environment while remapping the actions.
Args:
action: Action to take.
Returns:
The next time_step from the environment.
"""
continuous_actions = self._map_actions(action, self._action_map)
env_action_spec = self._env.action_spec()
if tf.n... | Steps the environment while remapping the actions.
Args:
action: Action to take.
Returns:
The next time_step from the environment.
| _step | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def _step(self, action):
"""Steps the environment after clipping the actions.
Args:
action: Action to take.
Returns:
The next time_step from the environment.
"""
env_action_spec = self._env.action_spec()
def _clip_to_spec(act_spec, act):
# NumPy does not allow both min and m... | Steps the environment after clipping the actions.
Args:
action: Action to take.
Returns:
The next time_step from the environment.
| _step | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def __init__(
self,
env: py_environment.PyEnvironment,
observations_allowlist: Optional[Sequence[Text]] = None,
handle_auto_reset: bool = False,
):
"""Initializes a wrapper to flatten environment observations.
Args:
env: A `py_environment.PyEnvironment` environment to wrap.
... | Initializes a wrapper to flatten environment observations.
Args:
env: A `py_environment.PyEnvironment` environment to wrap.
observations_allowlist: A list of observation keys that want to be
observed from the environment. All other observations returned are
filtered out. If not provid... | __init__ | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def _filter_observations(self, observations):
"""Filters out unwanted observations from the environment.
Args:
observations: A nested dictionary of arrays corresponding to
`observation_spec()`. This is the observation attribute in the TimeStep
object returned by the environment.
Retu... | Filters out unwanted observations from the environment.
Args:
observations: A nested dictionary of arrays corresponding to
`observation_spec()`. This is the observation attribute in the TimeStep
object returned by the environment.
Returns:
A nested dict of arrays corresponding to `... | _filter_observations | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def _pack_and_filter_timestep_observation(self, timestep):
"""Pack and filter observations into a single dimension.
Args:
timestep: A `TimeStep` namedtuple containing: - step_type: A `StepType`
value. - reward: Reward at this timestep. - discount: A discount in the
range [0, 1]. - observa... | Pack and filter observations into a single dimension.
Args:
timestep: A `TimeStep` namedtuple containing: - step_type: A `StepType`
value. - reward: Reward at this timestep. - discount: A discount in the
range [0, 1]. - observation: A NumPy array, or a nested dict, list or
tuple of ar... | _pack_and_filter_timestep_observation | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def _flatten_nested_observations(self, observations, is_batched):
"""Flatten individual observations and then flatten the nested structure.
Args:
observations: A flattened NumPy array of shape corresponding to
`observation_spec()` or an `observation_spec()`.
is_batched: Whether or not the p... | Flatten individual observations and then flatten the nested structure.
Args:
observations: A flattened NumPy array of shape corresponding to
`observation_spec()` or an `observation_spec()`.
is_batched: Whether or not the provided observation is batched.
Returns:
A concatenated and fl... | _flatten_nested_observations | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def __init__(
self, env: py_environment.PyEnvironment, handle_auto_reset: bool = False
):
"""Initializes a wrapper to add a goal to the observation.
Args:
env: A `py_environment.PyEnvironment` environment to wrap.
handle_auto_reset: When `True` the base class will handle auto_reset of
... | Initializes a wrapper to add a goal to the observation.
Args:
env: A `py_environment.PyEnvironment` environment to wrap.
handle_auto_reset: When `True` the base class will handle auto_reset of
the Environment.
Raises:
ValueError: If environment observation is not a dict
| __init__ | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def get_trajectory_with_goal(
self, trajectory: ts.TimeStep, goal: types.NestedArray
) -> ts.TimeStep:
"""Generates a new trajectory assuming the given goal was the actual target.
One example is updating a "distance-to-goal" field in the observation. Note
that relevant state information must be rec... | Generates a new trajectory assuming the given goal was the actual target.
One example is updating a "distance-to-goal" field in the observation. Note
that relevant state information must be recovered or re-calculated from the
given trajectory.
Args:
trajectory: An instance of `TimeStep`.
g... | get_trajectory_with_goal | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def get_goal_from_trajectory(
self, trajectory: ts.TimeStep
) -> types.NestedArray:
"""Extracts the goal from a given trajectory.
Args:
trajectory: An instance of `TimeStep`.
Returns:
Environment specific goal
Raises:
NotImplementedError: function should be implemented in ch... | Extracts the goal from a given trajectory.
Args:
trajectory: An instance of `TimeStep`.
Returns:
Environment specific goal
Raises:
NotImplementedError: function should be implemented in child class.
| get_goal_from_trajectory | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def _reset(self, *args, **kwargs):
"""Resets the environment, updating the trajectory with goal."""
trajectory = self._env.reset(*args, **kwargs)
self._goal = self.get_goal_from_trajectory(trajectory)
return self.get_trajectory_with_goal(trajectory, self._goal) | Resets the environment, updating the trajectory with goal. | _reset | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def _step(self, *args, **kwargs):
"""Execute a step in the environment, updating the trajectory with goal."""
trajectory = self._env.step(*args, **kwargs)
return self.get_trajectory_with_goal(trajectory, self._goal) | Execute a step in the environment, updating the trajectory with goal. | _step | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def __init__(
self,
env: py_environment.PyEnvironment,
history_length: int = 3,
include_actions: bool = False,
tile_first_step_obs: bool = False,
handle_auto_reset: bool = False,
):
"""Initializes a HistoryWrapper.
Args:
env: Environment to wrap.
history_length... | Initializes a HistoryWrapper.
Args:
env: Environment to wrap.
history_length: Length of the history to attach.
include_actions: Whether actions should be included in the history.
tile_first_step_obs: If True the observation on reset is tiled to fill the
history.
handle_auto_re... | __init__ | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def __init__(
self,
env: py_environment.PyEnvironment,
num_extra_actions: int,
handle_auto_reset: bool = False,
):
"""Initializes an instance of `ExtraDisabledActionsWrapper`.
Args:
env: The environment to wrap.
num_extra_actions: The number of extra actions to add.
... | Initializes an instance of `ExtraDisabledActionsWrapper`.
Args:
env: The environment to wrap.
num_extra_actions: The number of extra actions to add.
handle_auto_reset: When `True` the base class will handle auto_reset of
the Environment.
| __init__ | python | tensorflow/agents | tf_agents/environments/wrappers.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers.py | Apache-2.0 |
def compute_returns(rewards, discounts):
"""Python implementation of computing discounted returns."""
returns = np.zeros(len(rewards))
next_state_return = 0.0
for t in range(len(returns) - 1, -1, -1):
returns[t] = rewards[t] + discounts[t] * next_state_return
next_state_return = ... | Python implementation of computing discounted returns. | compute_returns | python | tensorflow/agents | tf_agents/environments/wrappers_test.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers_test.py | Apache-2.0 |
def test_with_varying_observation_specs(
self, observation_keys, observation_shapes, observation_dtypes
):
"""Vary the observation spec and step the environment."""
obs_spec = collections.OrderedDict()
for idx, key in enumerate(observation_keys):
obs_spec[key] = array_spec.ArraySpec(
... | Vary the observation spec and step the environment. | test_with_varying_observation_specs | python | tensorflow/agents | tf_agents/environments/wrappers_test.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers_test.py | Apache-2.0 |
def test_with_varying_observation_filters(self, observations_to_keep):
"""Vary the observations to save from the environment."""
obs_spec = collections.OrderedDict({
'obs1': array_spec.ArraySpec((1,), np.int32),
'obs2': array_spec.ArraySpec((2,), np.int32),
'obs3': array_spec.ArraySpec((... | Vary the observations to save from the environment. | test_with_varying_observation_filters | python | tensorflow/agents | tf_agents/environments/wrappers_test.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers_test.py | Apache-2.0 |
def test_env_reset(self):
"""Test the observations returned after an environment reset."""
obs_spec = collections.OrderedDict({
'obs1': array_spec.ArraySpec((1,), np.int32),
'obs2': array_spec.ArraySpec((2,), np.int32),
'obs3': array_spec.ArraySpec((3,), np.int32),
})
action_spe... | Test the observations returned after an environment reset. | test_env_reset | python | tensorflow/agents | tf_agents/environments/wrappers_test.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers_test.py | Apache-2.0 |
def test_observations_wrong_spec_for_allowlist(self, observation_spec):
"""Test the Wrapper has ValueError if the observation spec is invalid."""
action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)
env = random_py_environment.RandomPyEnvironment(
observation_spec, action_spec=action_sp... | Test the Wrapper has ValueError if the observation spec is invalid. | test_observations_wrong_spec_for_allowlist | python | tensorflow/agents | tf_agents/environments/wrappers_test.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers_test.py | Apache-2.0 |
def test_observations_unknown_allowlist(self):
"""Test the Wrapper has ValueError if given unknown keys."""
action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)
obs_spec = collections.OrderedDict({
'obs1': array_spec.ArraySpec((1,), np.int32),
'obs2': array_spec.ArraySpec((2,), ... | Test the Wrapper has ValueError if given unknown keys. | test_observations_unknown_allowlist | python | tensorflow/agents | tf_agents/environments/wrappers_test.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers_test.py | Apache-2.0 |
def test_observations_multiple_dtypes(self):
"""Test the Wrapper has ValueError if given unknown keys."""
action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)
obs_spec = collections.OrderedDict({
'obs1': array_spec.ArraySpec((1,), np.int32),
'obs2': array_spec.ArraySpec((2,), np... | Test the Wrapper has ValueError if given unknown keys. | test_observations_multiple_dtypes | python | tensorflow/agents | tf_agents/environments/wrappers_test.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers_test.py | Apache-2.0 |
def test_batch_env(self):
"""Vary the observation spec and step the environment."""
obs_spec = collections.OrderedDict({
'obs1': array_spec.ArraySpec((1,), np.int32),
'obs2': array_spec.ArraySpec((2,), np.int32),
})
action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)
#... | Vary the observation spec and step the environment. | test_batch_env | python | tensorflow/agents | tf_agents/environments/wrappers_test.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers_test.py | Apache-2.0 |
def _get_expected_shape(self, observation, observations_to_keep):
"""Gets the expected shape of a flattened observation nest."""
# The expected shape is the sum of observation lengths in the observation
# spec. For a multi-dimensional observation, it is flattened, thus the
# length is the product of it... | Gets the expected shape of a flattened observation nest. | _get_expected_shape | python | tensorflow/agents | tf_agents/environments/wrappers_test.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers_test.py | Apache-2.0 |
def test_with_varying_observation_specs(
self, observation_keys, observation_shapes, observation_dtypes
):
"""Vary the observation spec and step the environment."""
obs_spec = collections.OrderedDict()
for idx, key in enumerate(observation_keys):
obs_spec[key] = array_spec.ArraySpec(
... | Vary the observation spec and step the environment. | test_with_varying_observation_specs | python | tensorflow/agents | tf_agents/environments/wrappers_test.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers_test.py | Apache-2.0 |
def test_batch_env(self):
"""Test batched version of the environment."""
obs_spec = collections.OrderedDict({
'obs1': array_spec.ArraySpec((1,), np.int32),
'obs2': array_spec.ArraySpec((2,), np.int32),
})
action_spec = array_spec.BoundedArraySpec((), np.int32, -10, 10)
# Generate a ... | Test batched version of the environment. | test_batch_env | python | tensorflow/agents | tf_agents/environments/wrappers_test.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/wrappers_test.py | Apache-2.0 |
def __init__(self, rng: np.random.RandomState = None, discount=1.0):
"""Initializes TicTacToeEnvironment.
Args:
rng: If a random generator is provided, the opponent will choose a random
empty space. If None is provided, the opponent will choose the first
empty space.
discount: Disco... | Initializes TicTacToeEnvironment.
Args:
rng: If a random generator is provided, the opponent will choose a random
empty space. If None is provided, the opponent will choose the first
empty space.
discount: Discount for reward.
| __init__ | python | tensorflow/agents | tf_agents/environments/examples/tic_tac_toe_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/examples/tic_tac_toe_environment.py | Apache-2.0 |
def _check_states(self, states: np.ndarray):
"""Check if the given states are final and calculate reward.
Args:
states: states of the board.
Returns:
A tuple of (is_final, reward) where is_final means whether the states
are final are not, and reward is the reward for stepping into the st... | Check if the given states are final and calculate reward.
Args:
states: states of the board.
Returns:
A tuple of (is_final, reward) where is_final means whether the states
are final are not, and reward is the reward for stepping into the states
The meaning of reward: 0 = not decided or... | _check_states | python | tensorflow/agents | tf_agents/environments/examples/tic_tac_toe_environment.py | https://github.com/tensorflow/agents/blob/master/tf_agents/environments/examples/tic_tac_toe_environment.py | Apache-2.0 |
def compute(metrics, environment, policy, num_episodes=1):
"""Compute metrics using `policy` on the `environment`.
Args:
metrics: List of metrics to compute.
environment: py_environment instance.
policy: py_policy instance used to step the environment. A tf_policy can be
used in_eager_mode.
n... | Compute metrics using `policy` on the `environment`.
Args:
metrics: List of metrics to compute.
environment: py_environment instance.
policy: py_policy instance used to step the environment. A tf_policy can be
used in_eager_mode.
num_episodes: Number of episodes to compute the metrics over.
... | compute | python | tensorflow/agents | tf_agents/eval/metric_utils.py | https://github.com/tensorflow/agents/blob/master/tf_agents/eval/metric_utils.py | Apache-2.0 |
def compute_summaries(
metrics,
environment,
policy,
num_episodes=1,
global_step=None,
tf_summaries=True,
log=False,
callback=None,
):
"""Compute metrics using `policy` on the `environment` and logs summaries.
Args:
metrics: List of metrics to compute.
environment: py_enviro... | Compute metrics using `policy` on the `environment` and logs summaries.
Args:
metrics: List of metrics to compute.
environment: py_environment instance.
policy: py_policy instance used to step the environment. A tf_policy can be
used in_eager_mode.
num_episodes: Number of episodes to compute th... | compute_summaries | python | tensorflow/agents | tf_agents/eval/metric_utils.py | https://github.com/tensorflow/agents/blob/master/tf_agents/eval/metric_utils.py | Apache-2.0 |
def eager_compute(
metrics,
environment,
policy,
num_episodes=1,
train_step=None,
summary_writer=None,
summary_prefix='',
use_function=True,
):
"""Compute metrics using `policy` on the `environment`.
*NOTE*: Because placeholders are not compatible with Eager mode we can not use
py... | Compute metrics using `policy` on the `environment`.
*NOTE*: Because placeholders are not compatible with Eager mode we can not use
python policies. Because we use tf_policies we need the environment time_steps
to be tensors making it easier to use a tf_env for evaluations. Otherwise this
method mirrors `compu... | eager_compute | python | tensorflow/agents | tf_agents/eval/metric_utils.py | https://github.com/tensorflow/agents/blob/master/tf_agents/eval/metric_utils.py | Apache-2.0 |
def _transform_episode(episode: tf.data.Dataset) -> tf.data.Dataset:
"""Apply reward_shift and action_clipping to RLDS episode.
Args:
episode: An RLDS episode dataset of RLDS steps datasets.
Returns:
An RLDS episode after applying action clipping and reward shift.
"""
def _transform_s... | Apply reward_shift and action_clipping to RLDS episode.
Args:
episode: An RLDS episode dataset of RLDS steps datasets.
Returns:
An RLDS episode after applying action clipping and reward shift.
| _transform_episode | python | tensorflow/agents | tf_agents/examples/cql_sac/kumar20/cql_sac_train_eval.py | https://github.com/tensorflow/agents/blob/master/tf_agents/examples/cql_sac/kumar20/cql_sac_train_eval.py | Apache-2.0 |
def _transform_step(
rlds_step: Dict[str, tf.Tensor]
) -> Dict[str, tf.Tensor]:
"""Apply reward_shift and action_clipping to RLDS step.
Args:
rlds_step: An RLDS step is a dictionary of tensors containing is_first,
is_last, observation, action, reward, is_terminal, and discount... | Apply reward_shift and action_clipping to RLDS step.
Args:
rlds_step: An RLDS step is a dictionary of tensors containing is_first,
is_last, observation, action, reward, is_terminal, and discount.
Returns:
An RLDS step after applying action clipping and reward shift.
| _transform_step | python | tensorflow/agents | tf_agents/examples/cql_sac/kumar20/cql_sac_train_eval.py | https://github.com/tensorflow/agents/blob/master/tf_agents/examples/cql_sac/kumar20/cql_sac_train_eval.py | Apache-2.0 |
def _experience_dataset() -> tf.data.Dataset:
"""Reads and returns the experiences dataset from Reverb Replay Buffer."""
return reverb_replay.as_dataset(
sample_batch_size=batch_size, num_steps=_SEQUENCE_LENGTH
).prefetch(data_prefetch) | Reads and returns the experiences dataset from Reverb Replay Buffer. | _experience_dataset | python | tensorflow/agents | tf_agents/examples/cql_sac/kumar20/cql_sac_train_eval.py | https://github.com/tensorflow/agents/blob/master/tf_agents/examples/cql_sac/kumar20/cql_sac_train_eval.py | Apache-2.0 |
def create_single_tf_record_dataset(
filename: Text,
load_buffer_size: int = 0,
shuffle_buffer_size: int = 10000,
num_parallel_reads: Optional[int] = None,
decoder: Optional[DecoderFnType] = None,
reward_shift: float = 0.0,
action_clipping: Optional[Tuple[float, float]] = None,
use_traje... | Create a TF dataset for a single TFRecord file.
Args:
filename: Path to a single TFRecord file.
load_buffer_size: Number of bytes in the read buffer. 0 means no buffering.
shuffle_buffer_size: Size of the buffer for shuffling items within a single
TFRecord file.
num_parallel_reads: Optional, nu... | create_single_tf_record_dataset | python | tensorflow/agents | tf_agents/examples/cql_sac/kumar20/data_utils.py | https://github.com/tensorflow/agents/blob/master/tf_agents/examples/cql_sac/kumar20/data_utils.py | Apache-2.0 |
Subsets and Splits
Django Code with Docstrings
Filters Python code examples from Django repository that contain Django-related code, helping identify relevant code snippets for understanding Django framework usage patterns.
SQL Console for Shuu12121/python-treesitter-filtered-datasetsV2
Retrieves specific code examples from the Flask repository but doesn't provide meaningful analysis or patterns beyond basic data retrieval.
HTTPX Repo Code and Docstrings
Retrieves specific code examples from the httpx repository, which is useful for understanding how particular libraries are used but doesn't provide broader analytical insights about the dataset.
Requests Repo Docstrings & Code
Retrieves code examples with their docstrings and file paths from the requests repository, providing basic filtering but limited analytical value beyond finding specific code samples.
Quart Repo Docstrings & Code
Retrieves code examples with their docstrings from the Quart repository, providing basic code samples but offering limited analytical value for understanding broader patterns or relationships in the dataset.