diff --git a/.gitattributes b/.gitattributes index 0779e967c79695684a6afa90b9701684b8017d9f..5a8c0dd02789e1b1d5e7a3eff758227040ccb145 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1417,3 +1417,4 @@ evalkit_tf433/lib/libncurses++.a filter=lfs diff=lfs merge=lfs -text evalkit_tf433/lib/libasan.so filter=lfs diff=lfs merge=lfs -text evalkit_tf433/lib/python3.10/lib2to3/tests/__pycache__/test_fixers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text deepseek/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +deepseek/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/deepseek/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so b/deepseek/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..530567dae19aad9248eab5c8013d7d93cf287d3c --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fa898fa09439b50c1f316bc36c06c60fbb25269b653de81f01066339f189ea0 +size 322128 diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e68a42b80d9543ec33fb7bbe36b636ec4de59bc1 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/env_context.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/env_context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e7573208f8b1a47a223b1710590b106d9a71018 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/env_context.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/env_runner.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/env_runner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfd10ac18bd9360f645985d584c28adf5ac8ea37 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/env_runner.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/multi_agent_env_runner.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/multi_agent_env_runner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcef1d6449dcad2d8decb1a62e23d5cd5aefccdf Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/multi_agent_env_runner.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/policy_client.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/policy_client.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fc3703350d23072cc05a7c2bde1d48c40b6c969 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/policy_client.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/policy_server_input.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/policy_server_input.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d0cb32398eb3e293cb746f3de2bd2552f381e42 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/policy_server_input.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/single_agent_env_runner.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/single_agent_env_runner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af82d504e3448456b39256987eed42af513edcc4 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/__pycache__/single_agent_env_runner.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/apis/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/apis/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f8ca5f783c0200794bbaf375985d94103af7d17 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/apis/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/env_runner.py b/deepseek/lib/python3.10/site-packages/ray/rllib/env/env_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..af3f1a11cdac40509a281e3aac6e31cc338835fd --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/env/env_runner.py @@ -0,0 +1,168 @@ +import abc +import logging +from typing import Any, Dict, Tuple, TYPE_CHECKING + +import gymnasium as gym +import tree # pip install dm_tree + +from ray.rllib.utils.actor_manager import FaultAwareApply +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.torch_utils import convert_to_torch_tensor +from ray.rllib.utils.typing import TensorType +from ray.util.annotations import PublicAPI + +if TYPE_CHECKING: + from ray.rllib.algorithms.algorithm_config import AlgorithmConfig + +logger = logging.getLogger("ray.rllib") + +tf1, tf, _ = try_import_tf() + +ENV_RESET_FAILURE = "env_reset_failure" +ENV_STEP_FAILURE = "env_step_failure" + + +# TODO (sven): As soon as RolloutWorker is no longer supported, make this base class +# a Checkpointable. Currently, only some of its subclasses are Checkpointables. +@PublicAPI(stability="alpha") +class EnvRunner(FaultAwareApply, metaclass=abc.ABCMeta): + """Base class for distributed RL-style data collection from an environment. + + The EnvRunner API's core functionalities can be summarized as: + - Gets configured via passing a AlgorithmConfig object to the constructor. + Normally, subclasses of EnvRunner then construct their own environment (possibly + vectorized) copies and RLModules/Policies and use the latter to step through the + environment in order to collect training data. + - Clients of EnvRunner can use the `sample()` method to collect data for training + from the environment(s). + - EnvRunner offers parallelism via creating n remote Ray Actors based on this class. + Use `ray.remote([resources])(EnvRunner)` method to create the corresponding Ray + remote class. Then instantiate n Actors using the Ray `[ctor].remote(...)` syntax. + - EnvRunner clients can get information about the server/node on which the + individual Actors are running. + """ + + def __init__(self, *, config: "AlgorithmConfig", **kwargs): + """Initializes an EnvRunner instance. + + Args: + config: The AlgorithmConfig to use to setup this EnvRunner. + **kwargs: Forward compatibility kwargs. + """ + self.config = config.copy(copy_frozen=False) + self.env = None + + super().__init__(**kwargs) + + # This eager check is necessary for certain all-framework tests + # that use tf's eager_mode() context generator. + if ( + tf1 + and (self.config.framework_str == "tf2" or config.enable_tf1_exec_eagerly) + and not tf1.executing_eagerly() + ): + tf1.enable_eager_execution() + + @abc.abstractmethod + def assert_healthy(self): + """Checks that self.__init__() has been completed properly. + + Useful in case an `EnvRunner` is run as @ray.remote (Actor) and the owner + would like to make sure the Ray Actor has been properly initialized. + + Raises: + AssertionError: If the EnvRunner Actor has NOT been properly initialized. + """ + + # TODO: Make this an abstract method that must be implemented. + def make_env(self): + """Creates the RL environment for this EnvRunner and assigns it to `self.env`. + + Note that users should be able to change the EnvRunner's config (e.g. change + `self.config.env_config`) and then call this method to create new environments + with the updated configuration. + It should also be called after a failure of an earlier env in order to clean up + the existing env (for example `close()` it), re-create a new one, and then + continue sampling with that new env. + """ + pass + + @abc.abstractmethod + def sample(self, **kwargs) -> Any: + """Returns experiences (of any form) sampled from this EnvRunner. + + The exact nature and size of collected data are defined via the EnvRunner's + config and may be overridden by the given arguments. + + Args: + **kwargs: Forward compatibility kwargs. + + Returns: + The collected experience in any form. + """ + + @abc.abstractmethod + def get_spaces(self) -> Dict[str, Tuple[gym.Space, gym.Space]]: + """Returns a dict mapping ModuleIDs to 2-tuples of obs- and action space.""" + + def stop(self) -> None: + """Releases all resources used by this EnvRunner. + + For example, when using a gym.Env in this EnvRunner, you should make sure + that its `close()` method is called. + """ + pass + + def __del__(self) -> None: + """If this Actor is deleted, clears all resources used by it.""" + pass + + def _try_env_reset(self): + """Tries resetting the env and - if an error orrurs - handles it gracefully.""" + # Try to reset. + try: + obs, infos = self.env.reset() + # Everything ok -> return. + return obs, infos + # Error. + except Exception as e: + # If user wants to simply restart the env -> recreate env and try again + # (calling this method recursively until success). + if self.config.restart_failed_sub_environments: + logger.exception( + "Resetting the env resulted in an error! The original error " + f"is: {e.args[0]}" + ) + # Recreate the env and simply try again. + self.make_env() + return self._try_env_reset() + else: + raise e + + def _try_env_step(self, actions): + """Tries stepping the env and - if an error orrurs - handles it gracefully.""" + try: + results = self.env.step(actions) + return results + except Exception as e: + if self.config.restart_failed_sub_environments: + logger.exception( + "Stepping the env resulted in an error! The original error " + f"is: {e.args[0]}" + ) + # Recreate the env. + self.make_env() + # And return that the stepping failed. The caller will then handle + # specific cleanup operations (for example discarding thus-far collected + # data and repeating the step attempt). + return ENV_STEP_FAILURE + else: + raise e + + def _convert_to_tensor(self, struct) -> TensorType: + """Converts structs to a framework-specific tensor.""" + + if self.config.framework_str == "torch": + return convert_to_torch_tensor(struct) + else: + return tree.map_structure(tf.convert_to_tensor, struct) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/external_env.py b/deepseek/lib/python3.10/site-packages/ray/rllib/env/external_env.py new file mode 100644 index 0000000000000000000000000000000000000000..41eb89d6c471571beca90b9659d262a5283e4519 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/env/external_env.py @@ -0,0 +1,481 @@ +import gymnasium as gym +import queue +import threading +import uuid +from typing import Callable, Tuple, Optional, TYPE_CHECKING + +from ray.rllib.env.base_env import BaseEnv +from ray.rllib.utils.annotations import override, OldAPIStack +from ray.rllib.utils.typing import ( + EnvActionType, + EnvInfoDict, + EnvObsType, + EnvType, + MultiEnvDict, +) +from ray.rllib.utils.deprecation import deprecation_warning + +if TYPE_CHECKING: + from ray.rllib.models.preprocessors import Preprocessor + + +@OldAPIStack +class ExternalEnv(threading.Thread): + """An environment that interfaces with external agents. + + Unlike simulator envs, control is inverted: The environment queries the + policy to obtain actions and in return logs observations and rewards for + training. This is in contrast to gym.Env, where the algorithm drives the + simulation through env.step() calls. + + You can use ExternalEnv as the backend for policy serving (by serving HTTP + requests in the run loop), for ingesting offline logs data (by reading + offline transitions in the run loop), or other custom use cases not easily + expressed through gym.Env. + + ExternalEnv supports both on-policy actions (through self.get_action()), + and off-policy actions (through self.log_action()). + + This env is thread-safe, but individual episodes must be executed serially. + + .. testcode:: + :skipif: True + + from ray.tune import register_env + from ray.rllib.algorithms.dqn import DQN + YourExternalEnv = ... + register_env("my_env", lambda config: YourExternalEnv(config)) + algo = DQN(env="my_env") + while True: + print(algo.train()) + """ + + def __init__( + self, + action_space: gym.Space, + observation_space: gym.Space, + max_concurrent: int = None, + ): + """Initializes an ExternalEnv instance. + + Args: + action_space: Action space of the env. + observation_space: Observation space of the env. + """ + + threading.Thread.__init__(self) + + self.daemon = True + self.action_space = action_space + self.observation_space = observation_space + self._episodes = {} + self._finished = set() + self._results_avail_condition = threading.Condition() + if max_concurrent is not None: + deprecation_warning( + "The `max_concurrent` argument has been deprecated. Please configure" + "the number of episodes using the `rollout_fragment_length` and" + "`batch_mode` arguments. Please raise an issue on the Ray Github if " + "these arguments do not support your expected use case for ExternalEnv", + error=True, + ) + + def run(self): + """Override this to implement the run loop. + + Your loop should continuously: + 1. Call self.start_episode(episode_id) + 2. Call self.[get|log]_action(episode_id, obs, [action]?) + 3. Call self.log_returns(episode_id, reward) + 4. Call self.end_episode(episode_id, obs) + 5. Wait if nothing to do. + + Multiple episodes may be started at the same time. + """ + raise NotImplementedError + + def start_episode( + self, episode_id: Optional[str] = None, training_enabled: bool = True + ) -> str: + """Record the start of an episode. + + Args: + episode_id: Unique string id for the episode or + None for it to be auto-assigned and returned. + training_enabled: Whether to use experiences for this + episode to improve the policy. + + Returns: + Unique string id for the episode. + """ + + if episode_id is None: + episode_id = uuid.uuid4().hex + + if episode_id in self._finished: + raise ValueError("Episode {} has already completed.".format(episode_id)) + + if episode_id in self._episodes: + raise ValueError("Episode {} is already started".format(episode_id)) + + self._episodes[episode_id] = _ExternalEnvEpisode( + episode_id, self._results_avail_condition, training_enabled + ) + + return episode_id + + def get_action(self, episode_id: str, observation: EnvObsType) -> EnvActionType: + """Record an observation and get the on-policy action. + + Args: + episode_id: Episode id returned from start_episode(). + observation: Current environment observation. + + Returns: + Action from the env action space. + """ + + episode = self._get(episode_id) + return episode.wait_for_action(observation) + + def log_action( + self, episode_id: str, observation: EnvObsType, action: EnvActionType + ) -> None: + """Record an observation and (off-policy) action taken. + + Args: + episode_id: Episode id returned from start_episode(). + observation: Current environment observation. + action: Action for the observation. + """ + + episode = self._get(episode_id) + episode.log_action(observation, action) + + def log_returns( + self, episode_id: str, reward: float, info: Optional[EnvInfoDict] = None + ) -> None: + """Records returns (rewards and infos) from the environment. + + The reward will be attributed to the previous action taken by the + episode. Rewards accumulate until the next action. If no reward is + logged before the next action, a reward of 0.0 is assumed. + + Args: + episode_id: Episode id returned from start_episode(). + reward: Reward from the environment. + info: Optional info dict. + """ + + episode = self._get(episode_id) + episode.cur_reward += reward + + if info: + episode.cur_info = info or {} + + def end_episode(self, episode_id: str, observation: EnvObsType) -> None: + """Records the end of an episode. + + Args: + episode_id: Episode id returned from start_episode(). + observation: Current environment observation. + """ + + episode = self._get(episode_id) + self._finished.add(episode.episode_id) + episode.done(observation) + + def _get(self, episode_id: str) -> "_ExternalEnvEpisode": + """Get a started episode by its ID or raise an error.""" + + if episode_id in self._finished: + raise ValueError("Episode {} has already completed.".format(episode_id)) + + if episode_id not in self._episodes: + raise ValueError("Episode {} not found.".format(episode_id)) + + return self._episodes[episode_id] + + def to_base_env( + self, + make_env: Optional[Callable[[int], EnvType]] = None, + num_envs: int = 1, + remote_envs: bool = False, + remote_env_batch_wait_ms: int = 0, + restart_failed_sub_environments: bool = False, + ) -> "BaseEnv": + """Converts an RLlib MultiAgentEnv into a BaseEnv object. + + The resulting BaseEnv is always vectorized (contains n + sub-environments) to support batched forward passes, where n may + also be 1. BaseEnv also supports async execution via the `poll` and + `send_actions` methods and thus supports external simulators. + + Args: + make_env: A callable taking an int as input (which indicates + the number of individual sub-environments within the final + vectorized BaseEnv) and returning one individual + sub-environment. + num_envs: The number of sub-environments to create in the + resulting (vectorized) BaseEnv. The already existing `env` + will be one of the `num_envs`. + remote_envs: Whether each sub-env should be a @ray.remote + actor. You can set this behavior in your config via the + `remote_worker_envs=True` option. + remote_env_batch_wait_ms: The wait time (in ms) to poll remote + sub-environments for, if applicable. Only used if + `remote_envs` is True. + + Returns: + The resulting BaseEnv object. + """ + if num_envs != 1: + raise ValueError( + "External(MultiAgent)Env does not currently support " + "num_envs > 1. One way of solving this would be to " + "treat your Env as a MultiAgentEnv hosting only one " + "type of agent but with several copies." + ) + env = ExternalEnvWrapper(self) + + return env + + +@OldAPIStack +class _ExternalEnvEpisode: + """Tracked state for each active episode.""" + + def __init__( + self, + episode_id: str, + results_avail_condition: threading.Condition, + training_enabled: bool, + multiagent: bool = False, + ): + self.episode_id = episode_id + self.results_avail_condition = results_avail_condition + self.training_enabled = training_enabled + self.multiagent = multiagent + self.data_queue = queue.Queue() + self.action_queue = queue.Queue() + if multiagent: + self.new_observation_dict = None + self.new_action_dict = None + self.cur_reward_dict = {} + self.cur_terminated_dict = {"__all__": False} + self.cur_truncated_dict = {"__all__": False} + self.cur_info_dict = {} + else: + self.new_observation = None + self.new_action = None + self.cur_reward = 0.0 + self.cur_terminated = False + self.cur_truncated = False + self.cur_info = {} + + def get_data(self): + if self.data_queue.empty(): + return None + return self.data_queue.get_nowait() + + def log_action(self, observation, action): + if self.multiagent: + self.new_observation_dict = observation + self.new_action_dict = action + else: + self.new_observation = observation + self.new_action = action + self._send() + self.action_queue.get(True, timeout=60.0) + + def wait_for_action(self, observation): + if self.multiagent: + self.new_observation_dict = observation + else: + self.new_observation = observation + self._send() + return self.action_queue.get(True, timeout=300.0) + + def done(self, observation): + if self.multiagent: + self.new_observation_dict = observation + self.cur_terminated_dict = {"__all__": True} + # TODO(sven): External env API does not currently support truncated, + # but we should deprecate external Env anyways in favor of a client-only + # approach. + self.cur_truncated_dict = {"__all__": False} + else: + self.new_observation = observation + self.cur_terminated = True + self.cur_truncated = False + self._send() + + def _send(self): + if self.multiagent: + if not self.training_enabled: + for agent_id in self.cur_info_dict: + self.cur_info_dict[agent_id]["training_enabled"] = False + item = { + "obs": self.new_observation_dict, + "reward": self.cur_reward_dict, + "terminated": self.cur_terminated_dict, + "truncated": self.cur_truncated_dict, + "info": self.cur_info_dict, + } + if self.new_action_dict is not None: + item["off_policy_action"] = self.new_action_dict + self.new_observation_dict = None + self.new_action_dict = None + self.cur_reward_dict = {} + else: + item = { + "obs": self.new_observation, + "reward": self.cur_reward, + "terminated": self.cur_terminated, + "truncated": self.cur_truncated, + "info": self.cur_info, + } + if self.new_action is not None: + item["off_policy_action"] = self.new_action + self.new_observation = None + self.new_action = None + self.cur_reward = 0.0 + if not self.training_enabled: + item["info"]["training_enabled"] = False + + with self.results_avail_condition: + self.data_queue.put_nowait(item) + self.results_avail_condition.notify() + + +@OldAPIStack +class ExternalEnvWrapper(BaseEnv): + """Internal adapter of ExternalEnv to BaseEnv.""" + + def __init__( + self, external_env: "ExternalEnv", preprocessor: "Preprocessor" = None + ): + from ray.rllib.env.external_multi_agent_env import ExternalMultiAgentEnv + + self.external_env = external_env + self.prep = preprocessor + self.multiagent = issubclass(type(external_env), ExternalMultiAgentEnv) + self._action_space = external_env.action_space + if preprocessor: + self._observation_space = preprocessor.observation_space + else: + self._observation_space = external_env.observation_space + external_env.start() + + @override(BaseEnv) + def poll( + self, + ) -> Tuple[MultiEnvDict, MultiEnvDict, MultiEnvDict, MultiEnvDict, MultiEnvDict]: + with self.external_env._results_avail_condition: + results = self._poll() + while len(results[0]) == 0: + self.external_env._results_avail_condition.wait() + results = self._poll() + if not self.external_env.is_alive(): + raise Exception("Serving thread has stopped.") + return results + + @override(BaseEnv) + def send_actions(self, action_dict: MultiEnvDict) -> None: + from ray.rllib.env.base_env import _DUMMY_AGENT_ID + + if self.multiagent: + for env_id, actions in action_dict.items(): + self.external_env._episodes[env_id].action_queue.put(actions) + else: + for env_id, action in action_dict.items(): + self.external_env._episodes[env_id].action_queue.put( + action[_DUMMY_AGENT_ID] + ) + + def _poll( + self, + ) -> Tuple[ + MultiEnvDict, + MultiEnvDict, + MultiEnvDict, + MultiEnvDict, + MultiEnvDict, + MultiEnvDict, + ]: + from ray.rllib.env.base_env import with_dummy_agent_id + + all_obs, all_rewards, all_terminateds, all_truncateds, all_infos = ( + {}, + {}, + {}, + {}, + {}, + ) + off_policy_actions = {} + for eid, episode in self.external_env._episodes.copy().items(): + data = episode.get_data() + cur_terminated = ( + episode.cur_terminated_dict["__all__"] + if self.multiagent + else episode.cur_terminated + ) + cur_truncated = ( + episode.cur_truncated_dict["__all__"] + if self.multiagent + else episode.cur_truncated + ) + if cur_terminated or cur_truncated: + del self.external_env._episodes[eid] + if data: + if self.prep: + all_obs[eid] = self.prep.transform(data["obs"]) + else: + all_obs[eid] = data["obs"] + all_rewards[eid] = data["reward"] + all_terminateds[eid] = data["terminated"] + all_truncateds[eid] = data["truncated"] + all_infos[eid] = data["info"] + if "off_policy_action" in data: + off_policy_actions[eid] = data["off_policy_action"] + if self.multiagent: + # Ensure a consistent set of keys + # rely on all_obs having all possible keys for now. + for eid, eid_dict in all_obs.items(): + for agent_id in eid_dict.keys(): + + def fix(d, zero_val): + if agent_id not in d[eid]: + d[eid][agent_id] = zero_val + + fix(all_rewards, 0.0) + fix(all_terminateds, False) + fix(all_truncateds, False) + fix(all_infos, {}) + return ( + all_obs, + all_rewards, + all_terminateds, + all_truncateds, + all_infos, + off_policy_actions, + ) + else: + return ( + with_dummy_agent_id(all_obs), + with_dummy_agent_id(all_rewards), + with_dummy_agent_id(all_terminateds, "__all__"), + with_dummy_agent_id(all_truncateds, "__all__"), + with_dummy_agent_id(all_infos), + with_dummy_agent_id(off_policy_actions), + ) + + @property + @override(BaseEnv) + def observation_space(self) -> gym.spaces.Dict: + return self._observation_space + + @property + @override(BaseEnv) + def action_space(self) -> gym.Space: + return self._action_space diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/utils/__init__.py b/deepseek/lib/python3.10/site-packages/ray/rllib/env/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..09dfbe227e5a6c29bdfa7096758d529ddcb72d55 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/env/utils/__init__.py @@ -0,0 +1,124 @@ +import logging +from typing import Type, Union + +import gymnasium as gym + +from ray.rllib.env.env_context import EnvContext +from ray.rllib.utils.error import ( + ERR_MSG_INVALID_ENV_DESCRIPTOR, + EnvError, +) +from ray.util.annotations import PublicAPI + + +logger = logging.getLogger(__name__) + + +@PublicAPI +def try_import_pyspiel(error: bool = False): + """Tries importing pyspiel and returns the module (or None). + + Args: + error: Whether to raise an error if pyspiel cannot be imported. + + Returns: + The pyspiel module. + + Raises: + ImportError: If error=True and pyspiel is not installed. + """ + try: + import pyspiel + + return pyspiel + except ImportError: + if error: + raise ImportError( + "Could not import pyspiel! Pygame is not a dependency of RLlib " + "and RLlib requires you to install pygame separately: " + "`pip install pygame`." + ) + return None + + +@PublicAPI +def try_import_open_spiel(error: bool = False): + """Tries importing open_spiel and returns the module (or None). + + Args: + error: Whether to raise an error if open_spiel cannot be imported. + + Returns: + The open_spiel module. + + Raises: + ImportError: If error=True and open_spiel is not installed. + """ + try: + import open_spiel + + return open_spiel + except ImportError: + if error: + raise ImportError( + "Could not import open_spiel! open_spiel is not a dependency of RLlib " + "and RLlib requires you to install open_spiel separately: " + "`pip install open_spiel`." + ) + return None + + +def _gym_env_creator( + env_context: EnvContext, + env_descriptor: Union[str, Type[gym.Env]], +) -> gym.Env: + """Tries to create a gym env given an EnvContext object and descriptor. + + Note: This function tries to construct the env from a string descriptor + only using possibly installed RL env packages (such as gym, pybullet_envs, + etc). These packages are no installation requirements for RLlib. In case + you would like to support more such env packages, add the necessary imports + and construction logic below. + + Args: + env_context: The env context object to configure the env. + Note that this is a config dict, plus the properties: + `worker_index`, `vector_index`, and `remote`. + env_descriptor: The env descriptor as a gym-registered string, e.g. CartPole-v1, + ALE/MsPacman-v5, or CartPoleContinuousBulletEnv-v0. + Alternatively, the gym.Env subclass to use. + + Returns: + The actual gym environment object. + + Raises: + gym.error.Error: If the env cannot be constructed. + """ + # Allow for PyBullet or envs to be used as well (via string). This allows + # for doing things like `env=CartPoleContinuousBulletEnv-v0`. + try: + import pybullet_envs + + pybullet_envs.getList() + except (AttributeError, ModuleNotFoundError, ImportError): + pass + + # If env descriptor is a str, starting with "ale_py:ALE/", for now, register all ALE + # envs from ale_py. + if isinstance(env_descriptor, str) and env_descriptor.startswith("ale_py:ALE/"): + import ale_py + + gym.register_envs(ale_py) + + # Try creating a gym env. If this fails we can output a + # decent error message. + try: + # If class provided, call constructor directly. + if isinstance(env_descriptor, type): + env = env_descriptor(env_context) + else: + env = gym.make(env_descriptor, **env_context) + except gym.error.Error: + raise EnvError(ERR_MSG_INVALID_ENV_DESCRIPTOR.format(env_descriptor)) + + return env diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/utils/__pycache__/infinite_lookback_buffer.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/utils/__pycache__/infinite_lookback_buffer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d49d51dbae8776149fee17d0fa860544dfb87f3 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/utils/__pycache__/infinite_lookback_buffer.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__init__.py b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/atari_wrappers.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/atari_wrappers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8affc3deb3f0eeb6cad18ebb0abcabd0405a990 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/atari_wrappers.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/dm_control_wrapper.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/dm_control_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ab4894dad94d41f3193e34124ad9da7cc329e88 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/dm_control_wrapper.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/dm_env_wrapper.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/dm_env_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06aadeb892a2a4c71a9fb0fa9330e302b4bfc12a Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/dm_env_wrapper.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/group_agents_wrapper.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/group_agents_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b2c419af073bfcfc2e083c3dde4e4b568c7addd Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/group_agents_wrapper.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/open_spiel.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/open_spiel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77e3e6e29312f76961628ebfff81f1a175e3f0cf Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/open_spiel.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/unity3d_env.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/unity3d_env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b36df1b313aafd67a19d75aa8406cb2b4e7c1f69 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/__pycache__/unity3d_env.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/dm_control_wrapper.py b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/dm_control_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..4c0a7407b9aec6244a84f2b13c85e8e0b88b6f65 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/dm_control_wrapper.py @@ -0,0 +1,220 @@ +""" +DeepMind Control Suite Wrapper directly sourced from: +https://github.com/denisyarats/dmc2gym + +MIT License + +Copyright (c) 2020 Denis Yarats + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" +from gymnasium import core, spaces + +try: + from dm_env import specs +except ImportError: + specs = None +try: + # Suppress MuJoCo warning (dm_control uses absl logging). + import absl.logging + + absl.logging.set_verbosity("error") + from dm_control import suite +except (ImportError, OSError): + suite = None +import numpy as np + +from ray.rllib.utils.annotations import PublicAPI + + +def _spec_to_box(spec): + def extract_min_max(s): + assert s.dtype == np.float64 or s.dtype == np.float32 + dim = np.int_(np.prod(s.shape)) + if type(s) == specs.Array: + bound = np.inf * np.ones(dim, dtype=np.float32) + return -bound, bound + elif type(s) == specs.BoundedArray: + zeros = np.zeros(dim, dtype=np.float32) + return s.minimum + zeros, s.maximum + zeros + + mins, maxs = [], [] + for s in spec: + mn, mx = extract_min_max(s) + mins.append(mn) + maxs.append(mx) + low = np.concatenate(mins, axis=0) + high = np.concatenate(maxs, axis=0) + assert low.shape == high.shape + return spaces.Box(low, high, dtype=np.float32) + + +def _flatten_obs(obs): + obs_pieces = [] + for v in obs.values(): + flat = np.array([v]) if np.isscalar(v) else v.ravel() + obs_pieces.append(flat) + return np.concatenate(obs_pieces, axis=0) + + +@PublicAPI +class DMCEnv(core.Env): + def __init__( + self, + domain_name, + task_name, + task_kwargs=None, + visualize_reward=False, + from_pixels=False, + height=64, + width=64, + camera_id=0, + frame_skip=2, + environment_kwargs=None, + channels_first=True, + preprocess=True, + ): + self._from_pixels = from_pixels + self._height = height + self._width = width + self._camera_id = camera_id + self._frame_skip = frame_skip + self._channels_first = channels_first + self.preprocess = preprocess + + if specs is None: + raise RuntimeError( + ( + "The `specs` module from `dm_env` was not imported. Make sure " + "`dm_env` is installed and visible in the current python " + "environment." + ) + ) + if suite is None: + raise RuntimeError( + ( + "The `suite` module from `dm_control` was not imported. Make " + "sure `dm_control` is installed and visible in the current " + "python enviornment." + ) + ) + + # create task + self._env = suite.load( + domain_name=domain_name, + task_name=task_name, + task_kwargs=task_kwargs, + visualize_reward=visualize_reward, + environment_kwargs=environment_kwargs, + ) + + # true and normalized action spaces + self._true_action_space = _spec_to_box([self._env.action_spec()]) + self._norm_action_space = spaces.Box( + low=-1.0, high=1.0, shape=self._true_action_space.shape, dtype=np.float32 + ) + + # create observation space + if from_pixels: + shape = [3, height, width] if channels_first else [height, width, 3] + self._observation_space = spaces.Box( + low=0, high=255, shape=shape, dtype=np.uint8 + ) + if preprocess: + self._observation_space = spaces.Box( + low=-0.5, high=0.5, shape=shape, dtype=np.float32 + ) + else: + self._observation_space = _spec_to_box( + self._env.observation_spec().values() + ) + + self._state_space = _spec_to_box(self._env.observation_spec().values()) + + self.current_state = None + + def __getattr__(self, name): + return getattr(self._env, name) + + def _get_obs(self, time_step): + if self._from_pixels: + obs = self.render( + height=self._height, width=self._width, camera_id=self._camera_id + ) + if self._channels_first: + obs = obs.transpose(2, 0, 1).copy() + if self.preprocess: + obs = obs / 255.0 - 0.5 + else: + obs = _flatten_obs(time_step.observation) + return obs.astype(np.float32) + + def _convert_action(self, action): + action = action.astype(np.float64) + true_delta = self._true_action_space.high - self._true_action_space.low + norm_delta = self._norm_action_space.high - self._norm_action_space.low + action = (action - self._norm_action_space.low) / norm_delta + action = action * true_delta + self._true_action_space.low + action = action.astype(np.float32) + return action + + @property + def observation_space(self): + return self._observation_space + + @property + def state_space(self): + return self._state_space + + @property + def action_space(self): + return self._norm_action_space + + def step(self, action): + assert self._norm_action_space.contains(action) + action = self._convert_action(action) + assert self._true_action_space.contains(action) + reward = 0.0 + extra = {"internal_state": self._env.physics.get_state().copy()} + + terminated = truncated = False + for _ in range(self._frame_skip): + time_step = self._env.step(action) + reward += time_step.reward or 0.0 + terminated = False + truncated = time_step.last() + if terminated or truncated: + break + obs = self._get_obs(time_step) + self.current_state = _flatten_obs(time_step.observation) + extra["discount"] = time_step.discount + return obs, reward, terminated, truncated, extra + + def reset(self, *, seed=None, options=None): + time_step = self._env.reset() + self.current_state = _flatten_obs(time_step.observation) + obs = self._get_obs(time_step) + return obs, {} + + def render(self, mode="rgb_array", height=None, width=None, camera_id=0): + assert mode == "rgb_array", "only support for rgb_array mode" + height = height or self._height + width = width or self._width + camera_id = camera_id or self._camera_id + return self._env.physics.render(height=height, width=width, camera_id=camera_id) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/exception_wrapper.py b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/exception_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..50f05fd3444930471c80bc493cfd2ad8bf1a0986 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/exception_wrapper.py @@ -0,0 +1,38 @@ +import logging +import traceback + +import gymnasium as gym + +logger = logging.getLogger(__name__) + + +class TooManyResetAttemptsException(Exception): + def __init__(self, max_attempts: int): + super().__init__( + f"Reached the maximum number of attempts ({max_attempts}) " + f"to reset an environment." + ) + + +class ResetOnExceptionWrapper(gym.Wrapper): + def __init__(self, env: gym.Env, max_reset_attempts: int = 5): + super().__init__(env) + self.max_reset_attempts = max_reset_attempts + + def reset(self, **kwargs): + attempt = 0 + while attempt < self.max_reset_attempts: + try: + return self.env.reset(**kwargs) + except Exception: + logger.error(traceback.format_exc()) + attempt += 1 + else: + raise TooManyResetAttemptsException(self.max_reset_attempts) + + def step(self, action): + try: + return self.env.step(action) + except Exception: + logger.error(traceback.format_exc()) + return self.reset(), 0.0, False, {"__terminated__": True} diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/unity3d_env.py b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/unity3d_env.py new file mode 100644 index 0000000000000000000000000000000000000000..45f0f910af923e2dc44598bdbee31b9605fc9bd4 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/env/wrappers/unity3d_env.py @@ -0,0 +1,381 @@ +from gymnasium.spaces import Box, MultiDiscrete, Tuple as TupleSpace +import logging +import numpy as np +import random +import time +from typing import Callable, Optional, Tuple + +from ray.rllib.env.multi_agent_env import MultiAgentEnv +from ray.rllib.policy.policy import PolicySpec +from ray.rllib.utils.annotations import PublicAPI +from ray.rllib.utils.typing import MultiAgentDict, PolicyID, AgentID + +logger = logging.getLogger(__name__) + + +@PublicAPI +class Unity3DEnv(MultiAgentEnv): + """A MultiAgentEnv representing a single Unity3D game instance. + + For an example on how to use this Env with a running Unity3D editor + or with a compiled game, see: + `rllib/examples/unity3d_env_local.py` + For an example on how to use it inside a Unity game client, which + connects to an RLlib Policy server, see: + `rllib/examples/envs/external_envs/unity3d_[client|server].py` + + Supports all Unity3D (MLAgents) examples, multi- or single-agent and + gets converted automatically into an ExternalMultiAgentEnv, when used + inside an RLlib PolicyClient for cloud/distributed training of Unity games. + """ + + # Default base port when connecting directly to the Editor + _BASE_PORT_EDITOR = 5004 + # Default base port when connecting to a compiled environment + _BASE_PORT_ENVIRONMENT = 5005 + # The worker_id for each environment instance + _WORKER_ID = 0 + + def __init__( + self, + file_name: str = None, + port: Optional[int] = None, + seed: int = 0, + no_graphics: bool = False, + timeout_wait: int = 300, + episode_horizon: int = 1000, + ): + """Initializes a Unity3DEnv object. + + Args: + file_name (Optional[str]): Name of the Unity game binary. + If None, will assume a locally running Unity3D editor + to be used, instead. + port (Optional[int]): Port number to connect to Unity environment. + seed: A random seed value to use for the Unity3D game. + no_graphics: Whether to run the Unity3D simulator in + no-graphics mode. Default: False. + timeout_wait: Time (in seconds) to wait for connection from + the Unity3D instance. + episode_horizon: A hard horizon to abide to. After at most + this many steps (per-agent episode `step()` calls), the + Unity3D game is reset and will start again (finishing the + multi-agent episode that the game represents). + Note: The game itself may contain its own episode length + limits, which are always obeyed (on top of this value here). + """ + super().__init__() + + if file_name is None: + print( + "No game binary provided, will use a running Unity editor " + "instead.\nMake sure you are pressing the Play (|>) button in " + "your editor to start." + ) + + import mlagents_envs + from mlagents_envs.environment import UnityEnvironment + + # Try connecting to the Unity3D game instance. If a port is blocked + port_ = None + while True: + # Sleep for random time to allow for concurrent startup of many + # environments (num_env_runners >> 1). Otherwise, would lead to port + # conflicts sometimes. + if port_ is not None: + time.sleep(random.randint(1, 10)) + port_ = port or ( + self._BASE_PORT_ENVIRONMENT if file_name else self._BASE_PORT_EDITOR + ) + # cache the worker_id and + # increase it for the next environment + worker_id_ = Unity3DEnv._WORKER_ID if file_name else 0 + Unity3DEnv._WORKER_ID += 1 + try: + self.unity_env = UnityEnvironment( + file_name=file_name, + worker_id=worker_id_, + base_port=port_, + seed=seed, + no_graphics=no_graphics, + timeout_wait=timeout_wait, + ) + print("Created UnityEnvironment for port {}".format(port_ + worker_id_)) + except mlagents_envs.exception.UnityWorkerInUseException: + pass + else: + break + + # ML-Agents API version. + self.api_version = self.unity_env.API_VERSION.split(".") + self.api_version = [int(s) for s in self.api_version] + + # Reset entire env every this number of step calls. + self.episode_horizon = episode_horizon + # Keep track of how many times we have called `step` so far. + self.episode_timesteps = 0 + + def step( + self, action_dict: MultiAgentDict + ) -> Tuple[ + MultiAgentDict, MultiAgentDict, MultiAgentDict, MultiAgentDict, MultiAgentDict + ]: + """Performs one multi-agent step through the game. + + Args: + action_dict: Multi-agent action dict with: + keys=agent identifier consisting of + [MLagents behavior name, e.g. "Goalie?team=1"] + "_" + + [Agent index, a unique MLAgent-assigned index per single agent] + + Returns: + tuple: + - obs: Multi-agent observation dict. + Only those observations for which to get new actions are + returned. + - rewards: Rewards dict matching `obs`. + - dones: Done dict with only an __all__ multi-agent entry in + it. __all__=True, if episode is done for all agents. + - infos: An (empty) info dict. + """ + from mlagents_envs.base_env import ActionTuple + + # Set only the required actions (from the DecisionSteps) in Unity3D. + all_agents = [] + for behavior_name in self.unity_env.behavior_specs: + # New ML-Agents API: Set all agents actions at the same time + # via an ActionTuple. Since API v1.4.0. + if self.api_version[0] > 1 or ( + self.api_version[0] == 1 and self.api_version[1] >= 4 + ): + actions = [] + for agent_id in self.unity_env.get_steps(behavior_name)[0].agent_id: + key = behavior_name + "_{}".format(agent_id) + all_agents.append(key) + actions.append(action_dict[key]) + if actions: + if actions[0].dtype == np.float32: + action_tuple = ActionTuple(continuous=np.array(actions)) + else: + action_tuple = ActionTuple(discrete=np.array(actions)) + self.unity_env.set_actions(behavior_name, action_tuple) + # Old behavior: Do not use an ActionTuple and set each agent's + # action individually. + else: + for agent_id in self.unity_env.get_steps(behavior_name)[ + 0 + ].agent_id_to_index.keys(): + key = behavior_name + "_{}".format(agent_id) + all_agents.append(key) + self.unity_env.set_action_for_agent( + behavior_name, agent_id, action_dict[key] + ) + # Do the step. + self.unity_env.step() + + obs, rewards, terminateds, truncateds, infos = self._get_step_results() + + # Global horizon reached? -> Return __all__ truncated=True, so user + # can reset. Set all agents' individual `truncated` to True as well. + self.episode_timesteps += 1 + if self.episode_timesteps > self.episode_horizon: + return ( + obs, + rewards, + terminateds, + dict({"__all__": True}, **{agent_id: True for agent_id in all_agents}), + infos, + ) + + return obs, rewards, terminateds, truncateds, infos + + def reset( + self, *, seed=None, options=None + ) -> Tuple[MultiAgentDict, MultiAgentDict]: + """Resets the entire Unity3D scene (a single multi-agent episode).""" + self.episode_timesteps = 0 + self.unity_env.reset() + obs, _, _, _, infos = self._get_step_results() + return obs, infos + + def _get_step_results(self): + """Collects those agents' obs/rewards that have to act in next `step`. + + Returns: + Tuple: + obs: Multi-agent observation dict. + Only those observations for which to get new actions are + returned. + rewards: Rewards dict matching `obs`. + dones: Done dict with only an __all__ multi-agent entry in it. + __all__=True, if episode is done for all agents. + infos: An (empty) info dict. + """ + obs = {} + rewards = {} + infos = {} + for behavior_name in self.unity_env.behavior_specs: + decision_steps, terminal_steps = self.unity_env.get_steps(behavior_name) + # Important: Only update those sub-envs that are currently + # available within _env_state. + # Loop through all envs ("agents") and fill in, whatever + # information we have. + for agent_id, idx in decision_steps.agent_id_to_index.items(): + key = behavior_name + "_{}".format(agent_id) + os = tuple(o[idx] for o in decision_steps.obs) + os = os[0] if len(os) == 1 else os + obs[key] = os + rewards[key] = ( + decision_steps.reward[idx] + decision_steps.group_reward[idx] + ) + for agent_id, idx in terminal_steps.agent_id_to_index.items(): + key = behavior_name + "_{}".format(agent_id) + # Only overwrite rewards (last reward in episode), b/c obs + # here is the last obs (which doesn't matter anyways). + # Unless key does not exist in obs. + if key not in obs: + os = tuple(o[idx] for o in terminal_steps.obs) + obs[key] = os = os[0] if len(os) == 1 else os + rewards[key] = ( + terminal_steps.reward[idx] + terminal_steps.group_reward[idx] + ) + + # Only use dones if all agents are done, then we should do a reset. + return obs, rewards, {"__all__": False}, {"__all__": False}, infos + + @staticmethod + def get_policy_configs_for_game( + game_name: str, + ) -> Tuple[dict, Callable[[AgentID], PolicyID]]: + + # The RLlib server must know about the Spaces that the Client will be + # using inside Unity3D, up-front. + obs_spaces = { + # 3DBall. + "3DBall": Box(float("-inf"), float("inf"), (8,)), + # 3DBallHard. + "3DBallHard": Box(float("-inf"), float("inf"), (45,)), + # GridFoodCollector + "GridFoodCollector": Box(float("-inf"), float("inf"), (40, 40, 6)), + # Pyramids. + "Pyramids": TupleSpace( + [ + Box(float("-inf"), float("inf"), (56,)), + Box(float("-inf"), float("inf"), (56,)), + Box(float("-inf"), float("inf"), (56,)), + Box(float("-inf"), float("inf"), (4,)), + ] + ), + # SoccerTwos. + "SoccerPlayer": TupleSpace( + [ + Box(-1.0, 1.0, (264,)), + Box(-1.0, 1.0, (72,)), + ] + ), + # SoccerStrikersVsGoalie. + "Goalie": Box(float("-inf"), float("inf"), (738,)), + "Striker": TupleSpace( + [ + Box(float("-inf"), float("inf"), (231,)), + Box(float("-inf"), float("inf"), (63,)), + ] + ), + # Sorter. + "Sorter": TupleSpace( + [ + Box( + float("-inf"), + float("inf"), + ( + 20, + 23, + ), + ), + Box(float("-inf"), float("inf"), (10,)), + Box(float("-inf"), float("inf"), (8,)), + ] + ), + # Tennis. + "Tennis": Box(float("-inf"), float("inf"), (27,)), + # VisualHallway. + "VisualHallway": Box(float("-inf"), float("inf"), (84, 84, 3)), + # Walker. + "Walker": Box(float("-inf"), float("inf"), (212,)), + # FoodCollector. + "FoodCollector": TupleSpace( + [ + Box(float("-inf"), float("inf"), (49,)), + Box(float("-inf"), float("inf"), (4,)), + ] + ), + } + action_spaces = { + # 3DBall. + "3DBall": Box(-1.0, 1.0, (2,), dtype=np.float32), + # 3DBallHard. + "3DBallHard": Box(-1.0, 1.0, (2,), dtype=np.float32), + # GridFoodCollector. + "GridFoodCollector": MultiDiscrete([3, 3, 3, 2]), + # Pyramids. + "Pyramids": MultiDiscrete([5]), + # SoccerStrikersVsGoalie. + "Goalie": MultiDiscrete([3, 3, 3]), + "Striker": MultiDiscrete([3, 3, 3]), + # SoccerTwos. + "SoccerPlayer": MultiDiscrete([3, 3, 3]), + # Sorter. + "Sorter": MultiDiscrete([3, 3, 3]), + # Tennis. + "Tennis": Box(-1.0, 1.0, (3,)), + # VisualHallway. + "VisualHallway": MultiDiscrete([5]), + # Walker. + "Walker": Box(-1.0, 1.0, (39,)), + # FoodCollector. + "FoodCollector": MultiDiscrete([3, 3, 3, 2]), + } + + # Policies (Unity: "behaviors") and agent-to-policy mapping fns. + if game_name == "SoccerStrikersVsGoalie": + policies = { + "Goalie": PolicySpec( + observation_space=obs_spaces["Goalie"], + action_space=action_spaces["Goalie"], + ), + "Striker": PolicySpec( + observation_space=obs_spaces["Striker"], + action_space=action_spaces["Striker"], + ), + } + + def policy_mapping_fn(agent_id, episode, worker, **kwargs): + return "Striker" if "Striker" in agent_id else "Goalie" + + elif game_name == "SoccerTwos": + policies = { + "PurplePlayer": PolicySpec( + observation_space=obs_spaces["SoccerPlayer"], + action_space=action_spaces["SoccerPlayer"], + ), + "BluePlayer": PolicySpec( + observation_space=obs_spaces["SoccerPlayer"], + action_space=action_spaces["SoccerPlayer"], + ), + } + + def policy_mapping_fn(agent_id, episode, worker, **kwargs): + return "BluePlayer" if "1_" in agent_id else "PurplePlayer" + + else: + policies = { + game_name: PolicySpec( + observation_space=obs_spaces[game_name], + action_space=action_spaces[game_name], + ), + } + + def policy_mapping_fn(agent_id, episode, worker, **kwargs): + return game_name + + return policies, policy_mapping_fn diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/__init__.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3909f4f7f3a806dd8509ebe67843f4a400d2da06 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/__init__.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/cartpole_mass.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/cartpole_mass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1272e6ecb19d05f37c2f6abd070fefca052b8806 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/cartpole_mass.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/coin_game_non_vectorized_env.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/coin_game_non_vectorized_env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a929b8b70218f2ea39697c886a5c0ec24b7aab2b Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/coin_game_non_vectorized_env.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/env_with_subprocess.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/env_with_subprocess.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c111de8922c811fb66c4d9baf7d36e05e8961083 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/env_with_subprocess.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/multi_agent.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/multi_agent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f02fb21d4eb2dcc2ff886989b6ad1c1f2790dd2 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/multi_agent.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/pendulum_mass.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/pendulum_mass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7605fcbf229093b74bf4091fdd0798653eadc762 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/pendulum_mass.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/pettingzoo_chess.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/pettingzoo_chess.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c98d93dc34fed990dc31d285fc2ab435aa2c2180 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/pettingzoo_chess.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/random_env.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/random_env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e856d12c3082fbde41c1e99bd22a0d3de8776bc Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/random_env.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/repeat_initial_obs_env.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/repeat_initial_obs_env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4cd353861da7d8d2b17494fbdc56b7b67355b2a Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/repeat_initial_obs_env.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/two_step_game.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/two_step_game.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab9259d14ab79ebc66225be4b7a9bd5ca2d051d3 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/__pycache__/two_step_game.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/utils/__init__.py b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/envs/classes/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/examples/ray_tune/custom_experiment.py b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/ray_tune/custom_experiment.py new file mode 100644 index 0000000000000000000000000000000000000000..66ce75c11eb62f2d79813b73bb80fdd8954cbc53 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/examples/ray_tune/custom_experiment.py @@ -0,0 +1,183 @@ +"""Example of a custom Ray Tune experiment wrapping an RLlib Algorithm. + +You should only use such a customized workflow if the following conditions apply: +- You know exactly what you are doing :) +- Configuring an existing RLlib Algorithm (e.g. PPO) via its AlgorithmConfig +is not sufficient and doesn't allow you to shape the Algorithm into behaving the way +you'd like. Note that for complex, custom evaluation procedures there are many +AlgorithmConfig options one can use (for more details, see: +https://github.com/ray-project/ray/blob/master/rllib/examples/evaluation/custom_evaluation.py). # noqa +- Subclassing an RLlib Algorithm class and overriding the new class' `training_step` +method is not sufficient and doesn't allow you to define the algorithm's execution +logic the way you'd like. See an example here on how to customize the algorithm's +`training_step()` method: +https://github.com/ray-project/ray/blob/master/rllib/examples/algorithm/custom_training_step_on_and_off_policy_combined.py # noqa + + +How to run this script +---------------------- +`python [script file name].py` + + +Results to expect +----------------- +You should see the following output (at the end of the experiment) in your console: + +╭─────────────────────────────────────────────────────────────────────────────────────── +│ Trial name status iter total time (s) ts +├─────────────────────────────────────────────────────────────────────────────────────── +│ my_experiment_CartPole-v1_77083_00000 TERMINATED 10 36.7799 60000 +╰─────────────────────────────────────────────────────────────────────────────────────── +╭───────────────────────────────────────────────────────╮ +│ reward episode_len_mean episodes_this_iter │ +├───────────────────────────────────────────────────────┤ +│ 254.821 254.821 12 │ +╰───────────────────────────────────────────────────────╯ +evaluation episode returns=[500.0, 500.0, 500.0] + +Note that evaluation results (on the CartPole-v1 env) should be close to perfect +(episode return of ~500.0) as we are acting greedily inside the evaluation procedure. +""" +from typing import Dict + +import numpy as np +from ray import train, tune +from ray.rllib.algorithms.ppo import PPOConfig +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.metrics import NUM_ENV_STEPS_SAMPLED_LIFETIME + +torch, _ = try_import_torch() + + +def my_experiment(config: Dict): + + # Extract the number of iterations to run from the config. + train_iterations = config.pop("train-iterations", 2) + eval_episodes_to_do = config.pop("eval-episodes", 1) + + config = ( + PPOConfig() + .update_from_dict(config) + .api_stack(enable_rl_module_and_learner=True) + .environment("CartPole-v1") + ) + + # Train for n iterations with high LR. + config.training(lr=0.001) + algo_high_lr = config.build() + for _ in range(train_iterations): + train_results = algo_high_lr.train() + # Add the phase to the result dict. + train_results["phase"] = 1 + train.report(train_results) + phase_high_lr_time = train_results[NUM_ENV_STEPS_SAMPLED_LIFETIME] + checkpoint_training_high_lr = algo_high_lr.save() + algo_high_lr.stop() + + # Train for n iterations with low LR. + config.training(lr=0.00001) + algo_low_lr = config.build() + # Load state from the high-lr algo into this one. + algo_low_lr.restore(checkpoint_training_high_lr) + for _ in range(train_iterations): + train_results = algo_low_lr.train() + # Add the phase to the result dict. + train_results["phase"] = 2 + # keep time moving forward + train_results[NUM_ENV_STEPS_SAMPLED_LIFETIME] += phase_high_lr_time + train.report(train_results) + + checkpoint_training_low_lr = algo_low_lr.save() + algo_low_lr.stop() + + # After training, run a manual evaluation procedure. + + # Set the number of EnvRunners for collecting training data to 0 (local + # worker only). + config.env_runners(num_env_runners=0) + + eval_algo = config.build() + # Load state from the low-lr algo into this one. + eval_algo.restore(checkpoint_training_low_lr) + # The algo's local worker (SingleAgentEnvRunner) that holds a + # gym.vector.Env object and an RLModule for computing actions. + local_env_runner = eval_algo.env_runner + # Extract the gymnasium env object from the created algo (its local + # SingleAgentEnvRunner worker). Note that the env in this single-agent + # case is a gymnasium vector env and that we get its first sub-env here. + env = local_env_runner.env.unwrapped.envs[0] + + # The local worker (SingleAgentEnvRunner) + rl_module = local_env_runner.module + + # Run a very simple env loop and add up rewards over a single episode. + obs, infos = env.reset() + episode_returns = [] + episode_lengths = [] + sum_rewards = length = 0 + num_episodes = 0 + while num_episodes < eval_episodes_to_do: + # Call the RLModule's `forward_inference()` method to compute an + # action. + rl_module_out = rl_module.forward_inference( + { + "obs": torch.from_numpy(np.expand_dims(obs, 0)), # <- add B=1 + } + ) + action_logits = rl_module_out["action_dist_inputs"][0] # <- remove B=1 + action = np.argmax(action_logits.detach().cpu().numpy()) # act greedily + + # Step the env. + obs, reward, terminated, truncated, info = env.step(action) + + # Acculumate stats and reset the env, if necessary. + sum_rewards += reward + length += 1 + if terminated or truncated: + num_episodes += 1 + episode_returns.append(sum_rewards) + episode_lengths.append(length) + sum_rewards = length = 0 + obs, infos = env.reset() + + # Compile evaluation results. + eval_results = { + "eval_returns": episode_returns, + "eval_episode_lengths": episode_lengths, + } + # Combine the most recent training results with the just collected + # evaluation results. + results = {**train_results, **eval_results} + # Report everything. + train.report(results) + + +if __name__ == "__main__": + base_config = PPOConfig().environment("CartPole-v1").env_runners(num_env_runners=0) + # Convert to a plain dict for Tune. Note that this is usually not needed, you can + # pass into the below Tune Tuner any instantiated RLlib AlgorithmConfig object. + # However, for demonstration purposes, we show here how you can add other, arbitrary + # keys to the plain config dict and then pass these keys to your custom experiment + # function. + config_dict = base_config.to_dict() + + # Set a Special flag signalling `my_experiment` how many training steps to + # perform on each: the high learning rate and low learning rate. + config_dict["train-iterations"] = 5 + # Set a Special flag signalling `my_experiment` how many episodes to evaluate for. + config_dict["eval-episodes"] = 3 + + training_function = tune.with_resources( + my_experiment, + resources=base_config.algo_class.default_resource_request(base_config), + ) + + tuner = tune.Tuner( + training_function, + # Pass in your config dict. + param_space=config_dict, + ) + results = tuner.fit() + best_results = results.get_best_result() + + print(f"evaluation episode returns={best_results.metrics['eval_returns']}") diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/models/catalog.py b/deepseek/lib/python3.10/site-packages/ray/rllib/models/catalog.py new file mode 100644 index 0000000000000000000000000000000000000000..bff7ac243e6217037df6086e40330db3ad69c3eb --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/models/catalog.py @@ -0,0 +1,905 @@ +from functools import partial +import gymnasium as gym +from gymnasium.spaces import Box, Dict, Discrete, MultiDiscrete, Tuple +import logging +import numpy as np +import tree # pip install dm_tree +from typing import List, Optional, Type, Union + +from ray.tune.registry import ( + RLLIB_MODEL, + RLLIB_ACTION_DIST, + _global_registry, +) +from ray.rllib.models.action_dist import ActionDistribution +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.models.preprocessors import get_preprocessor, Preprocessor +from ray.rllib.models.tf.tf_action_dist import ( + Categorical, + Deterministic, + DiagGaussian, + Dirichlet, + MultiActionDistribution, + MultiCategorical, +) +from ray.rllib.models.torch.torch_action_dist import ( + TorchCategorical, + TorchDeterministic, + TorchDirichlet, + TorchDiagGaussian, + TorchMultiActionDistribution, + TorchMultiCategorical, +) +from ray.rllib.utils.annotations import DeveloperAPI, PublicAPI +from ray.rllib.utils.deprecation import ( + DEPRECATED_VALUE, + deprecation_warning, +) +from ray.rllib.utils.error import UnsupportedSpaceException +from ray.rllib.utils.framework import try_import_tf, try_import_torch +from ray.rllib.utils.from_config import from_config +from ray.rllib.utils.spaces.simplex import Simplex +from ray.rllib.utils.spaces.space_utils import flatten_space +from ray.rllib.utils.typing import ModelConfigDict, TensorType + +tf1, tf, tfv = try_import_tf() +torch, _ = try_import_torch() + +logger = logging.getLogger(__name__) + +# fmt: off +# __sphinx_doc_begin__ +MODEL_DEFAULTS: ModelConfigDict = { + "fcnet_hiddens": [256, 256], + "fcnet_activation": "tanh", + "fcnet_weights_initializer": None, + "fcnet_weights_initializer_config": None, + "fcnet_bias_initializer": None, + "fcnet_bias_initializer_config": None, + "conv_filters": None, + "conv_activation": "relu", + "conv_kernel_initializer": None, + "conv_kernel_initializer_config": None, + "conv_bias_initializer": None, + "conv_bias_initializer_config": None, + "conv_transpose_kernel_initializer": None, + "conv_transpose_kernel_initializer_config": None, + "conv_transpose_bias_initializer": None, + "conv_transpose_bias_initializer_config": None, + "post_fcnet_hiddens": [], + "post_fcnet_activation": "relu", + "post_fcnet_weights_initializer": None, + "post_fcnet_weights_initializer_config": None, + "post_fcnet_bias_initializer": None, + "post_fcnet_bias_initializer_config": None, + "free_log_std": False, + "log_std_clip_param": 20.0, + "no_final_linear": False, + "vf_share_layers": True, + "use_lstm": False, + "max_seq_len": 20, + "lstm_cell_size": 256, + "lstm_use_prev_action": False, + "lstm_use_prev_reward": False, + "lstm_weights_initializer": None, + "lstm_weights_initializer_config": None, + "lstm_bias_initializer": None, + "lstm_bias_initializer_config": None, + "_time_major": False, + "use_attention": False, + "attention_num_transformer_units": 1, + "attention_dim": 64, + "attention_num_heads": 1, + "attention_head_dim": 32, + "attention_memory_inference": 50, + "attention_memory_training": 50, + "attention_position_wise_mlp_dim": 32, + "attention_init_gru_gate_bias": 2.0, + "attention_use_n_prev_actions": 0, + "attention_use_n_prev_rewards": 0, + "framestack": True, + "dim": 84, + "grayscale": False, + "zero_mean": True, + "custom_model": None, + "custom_model_config": {}, + "custom_action_dist": None, + "custom_preprocessor": None, + "encoder_latent_dim": None, + "always_check_shapes": False, + + # Deprecated keys: + "lstm_use_prev_action_reward": DEPRECATED_VALUE, + "_use_default_native_models": DEPRECATED_VALUE, + "_disable_preprocessor_api": False, + "_disable_action_flattening": False, +} +# __sphinx_doc_end__ +# fmt: on + + +@DeveloperAPI +class ModelCatalog: + """Registry of models, preprocessors, and action distributions for envs. + + .. testcode:: + :skipif: True + + prep = ModelCatalog.get_preprocessor(env) + observation = prep.transform(raw_observation) + + dist_class, dist_dim = ModelCatalog.get_action_dist( + env.action_space, {}) + model = ModelCatalog.get_model_v2( + obs_space, action_space, num_outputs, options) + dist = dist_class(model.outputs, model) + action = dist.sample() + """ + + @staticmethod + @DeveloperAPI + def get_action_dist( + action_space: gym.Space, + config: ModelConfigDict, + dist_type: Optional[Union[str, Type[ActionDistribution]]] = None, + framework: str = "tf", + **kwargs + ) -> (type, int): + """Returns a distribution class and size for the given action space. + + Args: + action_space: Action space of the target gym env. + config (Optional[dict]): Optional model config. + dist_type (Optional[Union[str, Type[ActionDistribution]]]): + Identifier of the action distribution (str) interpreted as a + hint or the actual ActionDistribution class to use. + framework: One of "tf2", "tf", "torch", or "jax". + kwargs: Optional kwargs to pass on to the Distribution's + constructor. + + Returns: + Tuple: + - dist_class (ActionDistribution): Python class of the + distribution. + - dist_dim (int): The size of the input vector to the + distribution. + """ + + dist_cls = None + config = config or MODEL_DEFAULTS + # Custom distribution given. + if config.get("custom_action_dist"): + custom_action_config = config.copy() + action_dist_name = custom_action_config.pop("custom_action_dist") + logger.debug("Using custom action distribution {}".format(action_dist_name)) + dist_cls = _global_registry.get(RLLIB_ACTION_DIST, action_dist_name) + return ModelCatalog._get_multi_action_distribution( + dist_cls, action_space, custom_action_config, framework + ) + + # Dist_type is given directly as a class. + elif ( + type(dist_type) is type + and issubclass(dist_type, ActionDistribution) + and dist_type not in (MultiActionDistribution, TorchMultiActionDistribution) + ): + dist_cls = dist_type + # Box space -> DiagGaussian OR Deterministic. + elif isinstance(action_space, Box): + if action_space.dtype.name.startswith("int"): + low_ = np.min(action_space.low) + high_ = np.max(action_space.high) + dist_cls = ( + TorchMultiCategorical if framework == "torch" else MultiCategorical + ) + num_cats = int(np.prod(action_space.shape)) + return ( + partial( + dist_cls, + input_lens=[high_ - low_ + 1 for _ in range(num_cats)], + action_space=action_space, + ), + num_cats * (high_ - low_ + 1), + ) + else: + if len(action_space.shape) > 1: + raise UnsupportedSpaceException( + "Action space has multiple dimensions " + "{}. ".format(action_space.shape) + + "Consider reshaping this into a single dimension, " + "using a custom action distribution, " + "using a Tuple action space, or the multi-agent API." + ) + # TODO(sven): Check for bounds and return SquashedNormal, etc.. + if dist_type is None: + return ( + partial( + TorchDiagGaussian if framework == "torch" else DiagGaussian, + action_space=action_space, + ), + DiagGaussian.required_model_output_shape(action_space, config), + ) + elif dist_type == "deterministic": + dist_cls = ( + TorchDeterministic if framework == "torch" else Deterministic + ) + # Discrete Space -> Categorical. + elif isinstance(action_space, Discrete): + if framework == "torch": + dist_cls = TorchCategorical + elif framework == "jax": + from ray.rllib.models.jax.jax_action_dist import JAXCategorical + + dist_cls = JAXCategorical + else: + dist_cls = Categorical + # Tuple/Dict Spaces -> MultiAction. + elif dist_type in ( + MultiActionDistribution, + TorchMultiActionDistribution, + ) or isinstance(action_space, (Tuple, Dict)): + return ModelCatalog._get_multi_action_distribution( + ( + MultiActionDistribution + if framework == "tf" + else TorchMultiActionDistribution + ), + action_space, + config, + framework, + ) + # Simplex -> Dirichlet. + elif isinstance(action_space, Simplex): + dist_cls = TorchDirichlet if framework == "torch" else Dirichlet + # MultiDiscrete -> MultiCategorical. + elif isinstance(action_space, MultiDiscrete): + dist_cls = ( + TorchMultiCategorical if framework == "torch" else MultiCategorical + ) + return partial(dist_cls, input_lens=action_space.nvec), int( + sum(action_space.nvec) + ) + # Unknown type -> Error. + else: + raise NotImplementedError( + "Unsupported args: {} {}".format(action_space, dist_type) + ) + + return dist_cls, int(dist_cls.required_model_output_shape(action_space, config)) + + @staticmethod + @DeveloperAPI + def get_action_shape( + action_space: gym.Space, framework: str = "tf" + ) -> (np.dtype, List[int]): + """Returns action tensor dtype and shape for the action space. + + Args: + action_space: Action space of the target gym env. + framework: The framework identifier. One of "tf" or "torch". + + Returns: + (dtype, shape): Dtype and shape of the actions tensor. + """ + dl_lib = torch if framework == "torch" else tf + if isinstance(action_space, Discrete): + return action_space.dtype, (None,) + elif isinstance(action_space, (Box, Simplex)): + if np.issubdtype(action_space.dtype, np.floating): + return dl_lib.float32, (None,) + action_space.shape + elif np.issubdtype(action_space.dtype, np.integer): + return dl_lib.int32, (None,) + action_space.shape + else: + raise ValueError("RLlib doesn't support non int or float box spaces") + elif isinstance(action_space, MultiDiscrete): + return action_space.dtype, (None,) + action_space.shape + elif isinstance(action_space, (Tuple, Dict)): + flat_action_space = flatten_space(action_space) + size = 0 + all_discrete = True + for i in range(len(flat_action_space)): + if isinstance(flat_action_space[i], Discrete): + size += 1 + else: + all_discrete = False + size += np.prod(flat_action_space[i].shape) + size = int(size) + return dl_lib.int32 if all_discrete else dl_lib.float32, (None, size) + else: + raise NotImplementedError( + "Action space {} not supported".format(action_space) + ) + + @staticmethod + @DeveloperAPI + def get_action_placeholder( + action_space: gym.Space, name: str = "action" + ) -> TensorType: + """Returns an action placeholder consistent with the action space + + Args: + action_space: Action space of the target gym env. + name: An optional string to name the placeholder by. + Default: "action". + + Returns: + action_placeholder: A placeholder for the actions + """ + dtype, shape = ModelCatalog.get_action_shape(action_space, framework="tf") + + return tf1.placeholder(dtype, shape=shape, name=name) + + @staticmethod + @DeveloperAPI + def get_model_v2( + obs_space: gym.Space, + action_space: gym.Space, + num_outputs: int, + model_config: ModelConfigDict, + framework: str = "tf", + name: str = "default_model", + model_interface: type = None, + default_model: type = None, + **model_kwargs + ) -> ModelV2: + """Returns a suitable model compatible with given spaces and output. + + Args: + obs_space: Observation space of the target gym env. This + may have an `original_space` attribute that specifies how to + unflatten the tensor into a ragged tensor. + action_space: Action space of the target gym env. + num_outputs: The size of the output vector of the model. + model_config: The "model" sub-config dict + within the Algorithm's config dict. + framework: One of "tf2", "tf", "torch", or "jax". + name: Name (scope) for the model. + model_interface: Interface required for the model + default_model: Override the default class for the model. This + only has an effect when not using a custom model + model_kwargs: Args to pass to the ModelV2 constructor + + Returns: + model (ModelV2): Model to use for the policy. + """ + + # Validate the given config dict. + ModelCatalog._validate_config( + config=model_config, action_space=action_space, framework=framework + ) + + if model_config.get("custom_model"): + # Allow model kwargs to be overridden / augmented by + # custom_model_config. + customized_model_kwargs = dict( + model_kwargs, **model_config.get("custom_model_config", {}) + ) + + if isinstance(model_config["custom_model"], type): + model_cls = model_config["custom_model"] + elif ( + isinstance(model_config["custom_model"], str) + and "." in model_config["custom_model"] + ): + return from_config( + cls=model_config["custom_model"], + obs_space=obs_space, + action_space=action_space, + num_outputs=num_outputs, + model_config=customized_model_kwargs, + name=name, + ) + else: + model_cls = _global_registry.get( + RLLIB_MODEL, model_config["custom_model"] + ) + + # Only allow ModelV2 or native keras Models. + if not issubclass(model_cls, ModelV2): + if framework not in ["tf", "tf2"] or not issubclass( + model_cls, tf.keras.Model + ): + raise ValueError( + "`model_cls` must be a ModelV2 sub-class, but is" + " {}!".format(model_cls) + ) + + logger.info("Wrapping {} as {}".format(model_cls, model_interface)) + model_cls = ModelCatalog._wrap_if_needed(model_cls, model_interface) + + if framework in ["tf2", "tf"]: + # Try wrapping custom model with LSTM/attention, if required. + if model_config.get("use_lstm") or model_config.get("use_attention"): + from ray.rllib.models.tf.attention_net import ( + AttentionWrapper, + ) + from ray.rllib.models.tf.recurrent_net import ( + LSTMWrapper, + ) + + wrapped_cls = model_cls + forward = wrapped_cls.forward + model_cls = ModelCatalog._wrap_if_needed( + wrapped_cls, + LSTMWrapper + if model_config.get("use_lstm") + else AttentionWrapper, + ) + model_cls._wrapped_forward = forward + + # Obsolete: Track and warn if vars were created but not + # registered. Only still do this, if users do register their + # variables. If not (which they shouldn't), don't check here. + created = set() + + def track_var_creation(next_creator, **kw): + v = next_creator(**kw) + created.add(v.ref()) + return v + + with tf.variable_creator_scope(track_var_creation): + if issubclass(model_cls, tf.keras.Model): + instance = model_cls( + input_space=obs_space, + action_space=action_space, + num_outputs=num_outputs, + name=name, + **customized_model_kwargs, + ) + else: + # Try calling with kwargs first (custom ModelV2 should + # accept these as kwargs, not get them from + # config["custom_model_config"] anymore). + try: + instance = model_cls( + obs_space, + action_space, + num_outputs, + model_config, + name, + **customized_model_kwargs, + ) + except TypeError as e: + # Keyword error: Try old way w/o kwargs. + if "__init__() got an unexpected " in e.args[0]: + instance = model_cls( + obs_space, + action_space, + num_outputs, + model_config, + name, + **model_kwargs, + ) + logger.warning( + "Custom ModelV2 should accept all custom " + "options as **kwargs, instead of expecting" + " them in config['custom_model_config']!" + ) + # Other error -> re-raise. + else: + raise e + + # User still registered TFModelV2's variables: Check, whether + # ok. + registered = [] + if not isinstance(instance, tf.keras.Model): + registered = set(instance.var_list) + if len(registered) > 0: + not_registered = set() + for var in created: + if var not in registered: + not_registered.add(var) + if not_registered: + raise ValueError( + "It looks like you are still using " + "`{}.register_variables()` to register your " + "model's weights. This is no longer required, but " + "if you are still calling this method at least " + "once, you must make sure to register all created " + "variables properly. The missing variables are {}," + " and you only registered {}. " + "Did you forget to call `register_variables()` on " + "some of the variables in question?".format( + instance, not_registered, registered + ) + ) + elif framework == "torch": + # Try wrapping custom model with LSTM/attention, if required. + if model_config.get("use_lstm") or model_config.get("use_attention"): + from ray.rllib.models.torch.attention_net import AttentionWrapper + from ray.rllib.models.torch.recurrent_net import LSTMWrapper + + wrapped_cls = model_cls + forward = wrapped_cls.forward + model_cls = ModelCatalog._wrap_if_needed( + wrapped_cls, + LSTMWrapper + if model_config.get("use_lstm") + else AttentionWrapper, + ) + model_cls._wrapped_forward = forward + + # PyTorch automatically tracks nn.Modules inside the parent + # nn.Module's constructor. + # Try calling with kwargs first (custom ModelV2 should + # accept these as kwargs, not get them from + # config["custom_model_config"] anymore). + try: + instance = model_cls( + obs_space, + action_space, + num_outputs, + model_config, + name, + **customized_model_kwargs, + ) + except TypeError as e: + # Keyword error: Try old way w/o kwargs. + if "__init__() got an unexpected " in e.args[0]: + instance = model_cls( + obs_space, + action_space, + num_outputs, + model_config, + name, + **model_kwargs, + ) + logger.warning( + "Custom ModelV2 should accept all custom " + "options as **kwargs, instead of expecting" + " them in config['custom_model_config']!" + ) + # Other error -> re-raise. + else: + raise e + else: + raise NotImplementedError( + "`framework` must be 'tf2|tf|torch', but is " + "{}!".format(framework) + ) + + return instance + + # Find a default TFModelV2 and wrap with model_interface. + if framework in ["tf", "tf2"]: + v2_class = None + # Try to get a default v2 model. + if not model_config.get("custom_model"): + v2_class = default_model or ModelCatalog._get_v2_model_class( + obs_space, model_config, framework=framework + ) + + if not v2_class: + raise ValueError("ModelV2 class could not be determined!") + + if model_config.get("use_lstm") or model_config.get("use_attention"): + from ray.rllib.models.tf.attention_net import ( + AttentionWrapper, + ) + from ray.rllib.models.tf.recurrent_net import ( + LSTMWrapper, + ) + + wrapped_cls = v2_class + if model_config.get("use_lstm"): + v2_class = ModelCatalog._wrap_if_needed(wrapped_cls, LSTMWrapper) + v2_class._wrapped_forward = wrapped_cls.forward + else: + v2_class = ModelCatalog._wrap_if_needed( + wrapped_cls, AttentionWrapper + ) + v2_class._wrapped_forward = wrapped_cls.forward + + # Wrap in the requested interface. + wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface) + + if issubclass(wrapper, tf.keras.Model): + model = wrapper( + input_space=obs_space, + action_space=action_space, + num_outputs=num_outputs, + name=name, + **dict(model_kwargs, **model_config), + ) + return model + + return wrapper( + obs_space, action_space, num_outputs, model_config, name, **model_kwargs + ) + + # Find a default TorchModelV2 and wrap with model_interface. + elif framework == "torch": + # Try to get a default v2 model. + if not model_config.get("custom_model"): + v2_class = default_model or ModelCatalog._get_v2_model_class( + obs_space, model_config, framework=framework + ) + + if not v2_class: + raise ValueError("ModelV2 class could not be determined!") + + if model_config.get("use_lstm") or model_config.get("use_attention"): + from ray.rllib.models.torch.attention_net import AttentionWrapper + from ray.rllib.models.torch.recurrent_net import LSTMWrapper + + wrapped_cls = v2_class + forward = wrapped_cls.forward + if model_config.get("use_lstm"): + v2_class = ModelCatalog._wrap_if_needed(wrapped_cls, LSTMWrapper) + else: + v2_class = ModelCatalog._wrap_if_needed( + wrapped_cls, AttentionWrapper + ) + + v2_class._wrapped_forward = forward + + # Wrap in the requested interface. + wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface) + return wrapper( + obs_space, action_space, num_outputs, model_config, name, **model_kwargs + ) + + # Find a default JAXModelV2 and wrap with model_interface. + elif framework == "jax": + v2_class = default_model or ModelCatalog._get_v2_model_class( + obs_space, model_config, framework=framework + ) + # Wrap in the requested interface. + wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface) + return wrapper( + obs_space, action_space, num_outputs, model_config, name, **model_kwargs + ) + else: + raise NotImplementedError( + "`framework` must be 'tf2|tf|torch', but is " "{}!".format(framework) + ) + + @staticmethod + @DeveloperAPI + def get_preprocessor( + env: gym.Env, options: Optional[dict] = None, include_multi_binary: bool = False + ) -> Preprocessor: + """Returns a suitable preprocessor for the given env. + + This is a wrapper for get_preprocessor_for_space(). + """ + + return ModelCatalog.get_preprocessor_for_space( + env.observation_space, options, include_multi_binary + ) + + @staticmethod + @DeveloperAPI + def get_preprocessor_for_space( + observation_space: gym.Space, + options: dict = None, + include_multi_binary: bool = False, + ) -> Preprocessor: + """Returns a suitable preprocessor for the given observation space. + + Args: + observation_space: The input observation space. + options: Options to pass to the preprocessor. + include_multi_binary: Whether to include the MultiBinaryPreprocessor in + the possible preprocessors returned by this method. + + Returns: + preprocessor: Preprocessor for the observations. + """ + + options = options or MODEL_DEFAULTS + for k in options.keys(): + if k not in MODEL_DEFAULTS: + raise Exception( + "Unknown config key `{}`, all keys: {}".format( + k, list(MODEL_DEFAULTS) + ) + ) + + cls = get_preprocessor( + observation_space, include_multi_binary=include_multi_binary + ) + prep = cls(observation_space, options) + + if prep is not None: + logger.debug( + "Created preprocessor {}: {} -> {}".format( + prep, observation_space, prep.shape + ) + ) + return prep + + @staticmethod + @PublicAPI + def register_custom_model(model_name: str, model_class: type) -> None: + """Register a custom model class by name. + + The model can be later used by specifying {"custom_model": model_name} + in the model config. + + Args: + model_name: Name to register the model under. + model_class: Python class of the model. + """ + if tf is not None: + if issubclass(model_class, tf.keras.Model): + deprecation_warning(old="register_custom_model", error=False) + _global_registry.register(RLLIB_MODEL, model_name, model_class) + + @staticmethod + @PublicAPI + def register_custom_action_dist( + action_dist_name: str, action_dist_class: type + ) -> None: + """Register a custom action distribution class by name. + + The model can be later used by specifying + {"custom_action_dist": action_dist_name} in the model config. + + Args: + model_name: Name to register the action distribution under. + model_class: Python class of the action distribution. + """ + _global_registry.register( + RLLIB_ACTION_DIST, action_dist_name, action_dist_class + ) + + @staticmethod + def _wrap_if_needed(model_cls: type, model_interface: type) -> type: + if not model_interface or issubclass(model_cls, model_interface): + return model_cls + + assert issubclass(model_cls, ModelV2), model_cls + + class wrapper(model_interface, model_cls): + pass + + name = "{}_as_{}".format(model_cls.__name__, model_interface.__name__) + wrapper.__name__ = name + wrapper.__qualname__ = name + + return wrapper + + @staticmethod + def _get_v2_model_class( + input_space: gym.Space, model_config: ModelConfigDict, framework: str = "tf" + ) -> Type[ModelV2]: + VisionNet = None + ComplexNet = None + + if framework in ["tf2", "tf"]: + from ray.rllib.models.tf.fcnet import ( + FullyConnectedNetwork as FCNet, + ) + from ray.rllib.models.tf.visionnet import ( + VisionNetwork as VisionNet, + ) + from ray.rllib.models.tf.complex_input_net import ( + ComplexInputNetwork as ComplexNet, + ) + elif framework == "torch": + from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as FCNet + from ray.rllib.models.torch.visionnet import VisionNetwork as VisionNet + from ray.rllib.models.torch.complex_input_net import ( + ComplexInputNetwork as ComplexNet, + ) + elif framework == "jax": + from ray.rllib.models.jax.fcnet import FullyConnectedNetwork as FCNet + else: + raise ValueError( + "framework={} not supported in `ModelCatalog._get_v2_model_" + "class`!".format(framework) + ) + + orig_space = ( + input_space + if not hasattr(input_space, "original_space") + else input_space.original_space + ) + + # `input_space` is 3D Box -> VisionNet. + if isinstance(input_space, Box) and len(input_space.shape) == 3: + if framework == "jax": + raise NotImplementedError("No non-FC default net for JAX yet!") + return VisionNet + # `input_space` is 1D Box -> FCNet. + elif ( + isinstance(input_space, Box) + and len(input_space.shape) == 1 + and ( + not isinstance(orig_space, (Dict, Tuple)) + or not any( + isinstance(s, Box) and len(s.shape) >= 2 + for s in flatten_space(orig_space) + ) + ) + ): + return FCNet + # Complex (Dict, Tuple, 2D Box (flatten), Discrete, MultiDiscrete). + else: + if framework == "jax": + raise NotImplementedError("No non-FC default net for JAX yet!") + return ComplexNet + + @staticmethod + def _get_multi_action_distribution(dist_class, action_space, config, framework): + # In case the custom distribution is a child of MultiActionDistr. + # If users want to completely ignore the suggested child + # distributions, they should simply do so in their custom class' + # constructor. + if issubclass( + dist_class, (MultiActionDistribution, TorchMultiActionDistribution) + ): + flat_action_space = flatten_space(action_space) + child_dists_and_in_lens = tree.map_structure( + lambda s: ModelCatalog.get_action_dist(s, config, framework=framework), + flat_action_space, + ) + child_dists = [e[0] for e in child_dists_and_in_lens] + input_lens = [int(e[1]) for e in child_dists_and_in_lens] + return ( + partial( + dist_class, + action_space=action_space, + child_distributions=child_dists, + input_lens=input_lens, + ), + int(sum(input_lens)), + ) + return dist_class, dist_class.required_model_output_shape(action_space, config) + + @staticmethod + def _validate_config( + config: ModelConfigDict, action_space: gym.spaces.Space, framework: str + ) -> None: + """Validates a given model config dict. + + Args: + config: The "model" sub-config dict + within the Algorithm's config dict. + action_space: The action space of the model, whose config are + validated. + framework: One of "jax", "tf2", "tf", or "torch". + + Raises: + ValueError: If something is wrong with the given config. + """ + # Soft-deprecate custom preprocessors. + if config.get("custom_preprocessor") is not None: + deprecation_warning( + old="model.custom_preprocessor", + new="gym.ObservationWrapper around your env or handle complex " + "inputs inside your Model", + error=True, + ) + + if config.get("use_attention") and config.get("use_lstm"): + raise ValueError( + "Only one of `use_lstm` or `use_attention` may be set to True!" + ) + + # For complex action spaces, only allow prev action inputs to + # LSTMs and attention nets iff `_disable_action_flattening=True`. + # TODO: `_disable_action_flattening=True` will be the default in + # the future. + if ( + ( + config.get("lstm_use_prev_action") + or config.get("attention_use_n_prev_actions", 0) > 0 + ) + and not config.get("_disable_action_flattening") + and isinstance(action_space, (Tuple, Dict)) + ): + raise ValueError( + "For your complex action space (Tuple|Dict) and your model's " + "`prev-actions` setup of your model, you must set " + "`_disable_action_flattening=True` in your main config dict!" + ) + + if framework == "jax": + if config.get("use_attention"): + raise ValueError( + "`use_attention` not available for framework=jax so far!" + ) + elif config.get("use_lstm"): + raise ValueError("`use_lstm` not available for framework=jax so far!") diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/models/distributions.py b/deepseek/lib/python3.10/site-packages/ray/rllib/models/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..f0032ea2dd30709755c1c063a67a256973ae8ad5 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/models/distributions.py @@ -0,0 +1,248 @@ +"""This is the next version of action distribution base class.""" +from typing import Tuple +import gymnasium as gym +import abc + +from ray.rllib.utils.annotations import ExperimentalAPI +from ray.rllib.utils.typing import TensorType, Union +from ray.rllib.utils.annotations import override + + +@ExperimentalAPI +class Distribution(abc.ABC): + """The base class for distribution over a random variable. + + Examples: + + .. testcode:: + + import torch + from ray.rllib.core.models.configs import MLPHeadConfig + from ray.rllib.models.torch.torch_distributions import TorchCategorical + + model = MLPHeadConfig(input_dims=[1]).build(framework="torch") + + # Create an action distribution from model logits + action_logits = model(torch.Tensor([[1]])) + action_dist = TorchCategorical.from_logits(action_logits) + action = action_dist.sample() + + # Create another distribution from a dummy Tensor + action_dist2 = TorchCategorical.from_logits(torch.Tensor([0])) + + # Compute some common metrics + logp = action_dist.logp(action) + kl = action_dist.kl(action_dist2) + entropy = action_dist.entropy() + """ + + @abc.abstractmethod + def sample( + self, + *, + sample_shape: Tuple[int, ...] = None, + return_logp: bool = False, + **kwargs, + ) -> Union[TensorType, Tuple[TensorType, TensorType]]: + """Draw a sample from the distribution. + + Args: + sample_shape: The shape of the sample to draw. + return_logp: Whether to return the logp of the sampled values. + **kwargs: Forward compatibility placeholder. + + Returns: + The sampled values. If return_logp is True, returns a tuple of the + sampled values and its logp. + """ + + @abc.abstractmethod + def rsample( + self, + *, + sample_shape: Tuple[int, ...] = None, + return_logp: bool = False, + **kwargs, + ) -> Union[TensorType, Tuple[TensorType, TensorType]]: + """Draw a re-parameterized sample from the action distribution. + + If this method is implemented, we can take gradients of samples w.r.t. the + distribution parameters. + + Args: + sample_shape: The shape of the sample to draw. + return_logp: Whether to return the logp of the sampled values. + **kwargs: Forward compatibility placeholder. + + Returns: + The sampled values. If return_logp is True, returns a tuple of the + sampled values and its logp. + """ + + @abc.abstractmethod + def logp(self, value: TensorType, **kwargs) -> TensorType: + """The log-likelihood of the distribution computed at `value` + + Args: + value: The value to compute the log-likelihood at. + **kwargs: Forward compatibility placeholder. + + Returns: + The log-likelihood of the value. + """ + + @abc.abstractmethod + def kl(self, other: "Distribution", **kwargs) -> TensorType: + """The KL-divergence between two distributions. + + Args: + other: The other distribution. + **kwargs: Forward compatibility placeholder. + + Returns: + The KL-divergence between the two distributions. + """ + + @abc.abstractmethod + def entropy(self, **kwargs) -> TensorType: + """The entropy of the distribution. + + Args: + **kwargs: Forward compatibility placeholder. + + Returns: + The entropy of the distribution. + """ + + @staticmethod + @abc.abstractmethod + def required_input_dim(space: gym.Space, **kwargs) -> int: + """Returns the required length of an input parameter tensor. + + Args: + space: The space this distribution will be used for, + whose shape attributes will be used to determine the required shape of + the input parameter tensor. + **kwargs: Forward compatibility placeholder. + + Returns: + size of the required input vector (minus leading batch dimension). + """ + + @classmethod + def from_logits(cls, logits: TensorType, **kwargs) -> "Distribution": + """Creates a Distribution from logits. + + The caller does not need to have knowledge of the distribution class in order + to create it and sample from it. The passed batched logits vectors might be + split up and are passed to the distribution class' constructor as kwargs. + + Args: + logits: The logits to create the distribution from. + **kwargs: Forward compatibility placeholder. + + Returns: + The created distribution. + + .. testcode:: + + import numpy as np + from ray.rllib.models.distributions import Distribution + + class Uniform(Distribution): + def __init__(self, lower, upper): + self.lower = lower + self.upper = upper + + def sample(self): + return self.lower + (self.upper - self.lower) * np.random.rand() + + def logp(self, x): + ... + + def kl(self, other): + ... + + def entropy(self): + ... + + @staticmethod + def required_input_dim(space): + ... + + def rsample(self): + ... + + @classmethod + def from_logits(cls, logits, **kwargs): + return Uniform(logits[:, 0], logits[:, 1]) + + logits = np.array([[0.0, 1.0], [2.0, 3.0]]) + my_dist = Uniform.from_logits(logits) + sample = my_dist.sample() + """ + raise NotImplementedError + + @classmethod + def get_partial_dist_cls( + parent_cls: "Distribution", **partial_kwargs + ) -> "Distribution": + """Returns a partial child of TorchMultiActionDistribution. + + This is useful if inputs needed to instantiate the Distribution from logits + are available, but the logits are not. + """ + + class DistributionPartial(parent_cls): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @staticmethod + def _merge_kwargs(**kwargs): + """Checks if keys in kwargs don't clash with partial_kwargs.""" + overlap = set(kwargs) & set(partial_kwargs) + if overlap: + raise ValueError( + f"Cannot override the following kwargs: {overlap}.\n" + f"This is because they were already set at the time this " + f"partial class was defined." + ) + merged_kwargs = {**partial_kwargs, **kwargs} + return merged_kwargs + + @classmethod + @override(parent_cls) + def required_input_dim(cls, space: gym.Space, **kwargs) -> int: + merged_kwargs = cls._merge_kwargs(**kwargs) + assert space == merged_kwargs["space"] + return parent_cls.required_input_dim(**merged_kwargs) + + @classmethod + @override(parent_cls) + def from_logits( + cls, + logits: TensorType, + **kwargs, + ) -> "DistributionPartial": + merged_kwargs = cls._merge_kwargs(**kwargs) + distribution = parent_cls.from_logits(logits, **merged_kwargs) + # Replace the class of the returned distribution with this partial + # This makes it so that we can use type() on this distribution and + # get back the partial class. + distribution.__class__ = cls + return distribution + + # Substitute name of this partial class to match the original class. + DistributionPartial.__name__ = f"{parent_cls}Partial" + + return DistributionPartial + + def to_deterministic(self) -> "Distribution": + """Returns a deterministic equivalent for this distribution. + + Specifically, the deterministic equivalent for a Categorical distribution is a + Deterministic distribution that selects the action with maximum logit value. + Generally, the choice of the deterministic replacement is informed by + established conventions. + """ + raise NotImplementedError diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/complex_input_net.py b/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/complex_input_net.py new file mode 100644 index 0000000000000000000000000000000000000000..c5c81dba790c4f97f2c6ab5d2b765af0691005c8 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/complex_input_net.py @@ -0,0 +1,237 @@ +from gymnasium.spaces import Box, Discrete, MultiDiscrete +import numpy as np +import tree # pip install dm_tree + +from ray.rllib.models.torch.misc import ( + normc_initializer as torch_normc_initializer, + SlimFC, +) +from ray.rllib.models.catalog import ModelCatalog +from ray.rllib.models.modelv2 import ModelV2, restore_original_dimensions +from ray.rllib.models.torch.torch_modelv2 import TorchModelV2 +from ray.rllib.models.utils import get_filter_config +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.utils.annotations import OldAPIStack, override +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.spaces.space_utils import flatten_space +from ray.rllib.utils.torch_utils import one_hot + +torch, nn = try_import_torch() + + +@OldAPIStack +class ComplexInputNetwork(TorchModelV2, nn.Module): + """TorchModelV2 concat'ing CNN outputs to flat input(s), followed by FC(s). + + Note: This model should be used for complex (Dict or Tuple) observation + spaces that have one or more image components. + + The data flow is as follows: + + `obs` (e.g. Tuple[img0, img1, discrete0]) -> `CNN0 + CNN1 + ONE-HOT` + `CNN0 + CNN1 + ONE-HOT` -> concat all flat outputs -> `out` + `out` -> (optional) FC-stack -> `out2` + `out2` -> action (logits) and value heads. + """ + + def __init__(self, obs_space, action_space, num_outputs, model_config, name): + self.original_space = ( + obs_space.original_space + if hasattr(obs_space, "original_space") + else obs_space + ) + + self.processed_obs_space = ( + self.original_space + if model_config.get("_disable_preprocessor_api") + else obs_space + ) + + nn.Module.__init__(self) + TorchModelV2.__init__( + self, self.original_space, action_space, num_outputs, model_config, name + ) + + self.flattened_input_space = flatten_space(self.original_space) + + # Atari type CNNs or IMPALA type CNNs (with residual layers)? + # self.cnn_type = self.model_config["custom_model_config"].get( + # "conv_type", "atari") + + # Build the CNN(s) given obs_space's image components. + self.cnns = nn.ModuleDict() + self.one_hot = nn.ModuleDict() + self.flatten_dims = {} + self.flatten = nn.ModuleDict() + concat_size = 0 + for i, component in enumerate(self.flattened_input_space): + i = str(i) + # Image space. + if len(component.shape) == 3 and isinstance(component, Box): + config = { + "conv_filters": model_config["conv_filters"] + if "conv_filters" in model_config + else get_filter_config(component.shape), + "conv_activation": model_config.get("conv_activation"), + "post_fcnet_hiddens": [], + } + # if self.cnn_type == "atari": + self.cnns[i] = ModelCatalog.get_model_v2( + component, + action_space, + num_outputs=None, + model_config=config, + framework="torch", + name="cnn_{}".format(i), + ) + # TODO (sven): add IMPALA-style option. + # else: + # cnn = TorchImpalaVisionNet( + # component, + # action_space, + # num_outputs=None, + # model_config=config, + # name="cnn_{}".format(i)) + + concat_size += self.cnns[i].num_outputs + self.add_module("cnn_{}".format(i), self.cnns[i]) + # Discrete|MultiDiscrete inputs -> One-hot encode. + elif isinstance(component, (Discrete, MultiDiscrete)): + if isinstance(component, Discrete): + size = component.n + else: + size = np.sum(component.nvec) + config = { + "fcnet_hiddens": model_config["fcnet_hiddens"], + "fcnet_activation": model_config.get("fcnet_activation"), + "post_fcnet_hiddens": [], + } + self.one_hot[i] = ModelCatalog.get_model_v2( + Box(-1.0, 1.0, (size,), np.float32), + action_space, + num_outputs=None, + model_config=config, + framework="torch", + name="one_hot_{}".format(i), + ) + concat_size += self.one_hot[i].num_outputs + self.add_module("one_hot_{}".format(i), self.one_hot[i]) + # Everything else (1D Box). + else: + size = int(np.prod(component.shape)) + config = { + "fcnet_hiddens": model_config["fcnet_hiddens"], + "fcnet_activation": model_config.get("fcnet_activation"), + "post_fcnet_hiddens": [], + } + self.flatten[i] = ModelCatalog.get_model_v2( + Box(-1.0, 1.0, (size,), np.float32), + action_space, + num_outputs=None, + model_config=config, + framework="torch", + name="flatten_{}".format(i), + ) + self.flatten_dims[i] = size + concat_size += self.flatten[i].num_outputs + self.add_module("flatten_{}".format(i), self.flatten[i]) + + # Optional post-concat FC-stack. + post_fc_stack_config = { + "fcnet_hiddens": model_config.get("post_fcnet_hiddens", []), + "fcnet_activation": model_config.get("post_fcnet_activation", "relu"), + } + self.post_fc_stack = ModelCatalog.get_model_v2( + Box(float("-inf"), float("inf"), shape=(concat_size,), dtype=np.float32), + self.action_space, + None, + post_fc_stack_config, + framework="torch", + name="post_fc_stack", + ) + + # Actions and value heads. + self.logits_layer = None + self.value_layer = None + self._value_out = None + + if num_outputs: + # Action-distribution head. + self.logits_layer = SlimFC( + in_size=self.post_fc_stack.num_outputs, + out_size=num_outputs, + activation_fn=None, + initializer=torch_normc_initializer(0.01), + ) + # Create the value branch model. + self.value_layer = SlimFC( + in_size=self.post_fc_stack.num_outputs, + out_size=1, + activation_fn=None, + initializer=torch_normc_initializer(0.01), + ) + else: + self.num_outputs = concat_size + + @override(ModelV2) + def forward(self, input_dict, state, seq_lens): + if SampleBatch.OBS in input_dict and "obs_flat" in input_dict: + orig_obs = input_dict[SampleBatch.OBS] + else: + orig_obs = restore_original_dimensions( + input_dict[SampleBatch.OBS], self.processed_obs_space, tensorlib="torch" + ) + # Push observations through the different components + # (CNNs, one-hot + FC, etc..). + outs = [] + for i, component in enumerate(tree.flatten(orig_obs)): + i = str(i) + if i in self.cnns: + cnn_out, _ = self.cnns[i](SampleBatch({SampleBatch.OBS: component})) + outs.append(cnn_out) + elif i in self.one_hot: + if component.dtype in [ + torch.int8, + torch.int16, + torch.int32, + torch.int64, + torch.uint8, + ]: + one_hot_in = { + SampleBatch.OBS: one_hot( + component, self.flattened_input_space[int(i)] + ) + } + else: + one_hot_in = {SampleBatch.OBS: component} + one_hot_out, _ = self.one_hot[i](SampleBatch(one_hot_in)) + outs.append(one_hot_out) + else: + nn_out, _ = self.flatten[i]( + SampleBatch( + { + SampleBatch.OBS: torch.reshape( + component, [-1, self.flatten_dims[i]] + ) + } + ) + ) + outs.append(nn_out) + + # Concat all outputs and the non-image inputs. + out = torch.cat(outs, dim=1) + # Push through (optional) FC-stack (this may be an empty stack). + out, _ = self.post_fc_stack(SampleBatch({SampleBatch.OBS: out})) + + # No logits/value branches. + if self.logits_layer is None: + return out, [] + + # Logits- and value branches. + logits, values = self.logits_layer(out), self.value_layer(out) + self._value_out = torch.reshape(values, [-1]) + return logits, [] + + @override(ModelV2) + def value_function(self): + return self._value_out diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/convtranspose2d_stack.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/convtranspose2d_stack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77fc4205882a974db3cbdd9cf6b4fd7fff09e6db Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/convtranspose2d_stack.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/gru_gate.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/gru_gate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59503ac29f0d13b5e16151b30b599649789e776e Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/gru_gate.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/relative_multi_head_attention.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/relative_multi_head_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b404be2cba5e1e4a94e174db481095be48fc308 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/relative_multi_head_attention.cpython-310.pyc differ diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/multi_head_attention.py b/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/multi_head_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..cf4dfb50b2648b3ccd133a6b7be2e26839052969 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/multi_head_attention.py @@ -0,0 +1,70 @@ +""" +[1] - Attention Is All You Need - Vaswani, Jones, Shazeer, Parmar, + Uszkoreit, Gomez, Kaiser - Google Brain/Research, U Toronto - 2017. + https://arxiv.org/pdf/1706.03762.pdf +""" +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.models.torch.misc import SlimFC +from ray.rllib.utils.annotations import OldAPIStack +from ray.rllib.utils.torch_utils import sequence_mask +from ray.rllib.utils.framework import TensorType + +torch, nn = try_import_torch() + + +@OldAPIStack +class MultiHeadAttention(nn.Module): + """A multi-head attention layer described in [1].""" + + def __init__( + self, in_dim: int, out_dim: int, num_heads: int, head_dim: int, **kwargs + ): + """ + in_dim: Dimension of input + out_dim: Dimension of output + num_heads: Number of attention heads + head_dim: Output dimension of each attention head + """ + super().__init__(**kwargs) + + # No bias or non-linearity. + self._num_heads = num_heads + self._head_dim = head_dim + self._qkv_layer = SlimFC( + in_size=in_dim, out_size=3 * num_heads * head_dim, use_bias=False + ) + + self._linear_layer = SlimFC( + in_size=num_heads * head_dim, out_size=out_dim, use_bias=False + ) + + def forward(self, inputs: TensorType) -> TensorType: + L = list(inputs.size())[1] # length of segment + H = self._num_heads # number of attention heads + D = self._head_dim # attention head dimension + + qkv = self._qkv_layer(inputs) + + queries, keys, values = torch.chunk(input=qkv, chunks=3, dim=-1) + queries = queries[:, -L:] # only query based on the segment + + queries = torch.reshape(queries, [-1, L, H, D]) + keys = torch.reshape(keys, [-1, L, H, D]) + values = torch.reshape(values, [-1, L, H, D]) + + score = torch.einsum("bihd,bjhd->bijh", queries, keys) + score = score / D**0.5 + + # causal mask of the same length as the sequence + mask = sequence_mask(torch.arange(1, L + 1), dtype=score.dtype) + mask = mask[None, :, :, None] + mask = mask.float() + + masked_score = score * mask + 1e30 * (mask - 1.0) + wmat = nn.functional.softmax(masked_score, dim=2) + + out = torch.einsum("bijh,bjhd->bihd", wmat, values) + shape = list(out.size())[:2] + [H * D] + # temp = torch.cat(temp2, [H * D], dim=0) + out = torch.reshape(out, shape) + return self._linear_layer(out) diff --git a/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/noisy_layer.py b/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/noisy_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..8a9fe999cf79baeaa35af895311fbe8ecacdd302 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/ray/rllib/models/torch/modules/noisy_layer.py @@ -0,0 +1,99 @@ +import numpy as np + +from ray.rllib.models.utils import get_activation_fn +from ray.rllib.utils.framework import try_import_torch, TensorType + +torch, nn = try_import_torch() + + +class NoisyLayer(nn.Module): + r"""A Layer that adds learnable Noise to some previous layer's outputs. + + Consists of: + - a common dense layer: y = w^{T}x + b + - a noisy layer: y = (w + \epsilon_w*\sigma_w)^{T}x + + (b+\epsilon_b*\sigma_b) + , where \epsilon are random variables sampled from factorized normal + distributions and \sigma are trainable variables which are expected to + vanish along the training procedure. + """ + + def __init__( + self, in_size: int, out_size: int, sigma0: float, activation: str = "relu" + ): + """Initializes a NoisyLayer object. + + Args: + in_size: Input size for Noisy Layer + out_size: Output size for Noisy Layer + sigma0: Initialization value for sigma_b (bias noise) + activation: Non-linear activation for Noisy Layer + """ + super().__init__() + + self.in_size = in_size + self.out_size = out_size + self.sigma0 = sigma0 + self.activation = get_activation_fn(activation, framework="torch") + if self.activation is not None: + self.activation = self.activation() + + sigma_w = nn.Parameter( + torch.from_numpy( + np.random.uniform( + low=-1.0 / np.sqrt(float(self.in_size)), + high=1.0 / np.sqrt(float(self.in_size)), + size=[self.in_size, out_size], + ) + ).float() + ) + self.register_parameter("sigma_w", sigma_w) + sigma_b = nn.Parameter( + torch.from_numpy( + np.full( + shape=[out_size], fill_value=sigma0 / np.sqrt(float(self.in_size)) + ) + ).float() + ) + self.register_parameter("sigma_b", sigma_b) + + w = nn.Parameter( + torch.from_numpy( + np.full( + shape=[self.in_size, self.out_size], + fill_value=6 / np.sqrt(float(in_size) + float(out_size)), + ) + ).float() + ) + self.register_parameter("w", w) + b = nn.Parameter(torch.from_numpy(np.zeros([out_size])).float()) + self.register_parameter("b", b) + + def forward(self, inputs: TensorType) -> TensorType: + epsilon_in = self._f_epsilon( + torch.normal( + mean=torch.zeros([self.in_size]), std=torch.ones([self.in_size]) + ).to(inputs.device) + ) + epsilon_out = self._f_epsilon( + torch.normal( + mean=torch.zeros([self.out_size]), std=torch.ones([self.out_size]) + ).to(inputs.device) + ) + epsilon_w = torch.matmul( + torch.unsqueeze(epsilon_in, -1), other=torch.unsqueeze(epsilon_out, 0) + ) + epsilon_b = epsilon_out + + action_activation = ( + torch.matmul(inputs, self.w + self.sigma_w * epsilon_w) + + self.b + + self.sigma_b * epsilon_b + ) + + if self.activation is not None: + action_activation = self.activation(action_activation) + return action_activation + + def _f_epsilon(self, x: TensorType) -> TensorType: + return torch.sign(x) * torch.pow(torch.abs(x), 0.5) diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/_rest_streaming_base.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/_rest_streaming_base.py new file mode 100644 index 0000000000000000000000000000000000000000..3bc87a963e108befb5b21618b2c935881b0e776b --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/_rest_streaming_base.py @@ -0,0 +1,118 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers for server-side streaming in REST.""" + +from collections import deque +import string +from typing import Deque, Union +import types + +import proto +import google.protobuf.message +from google.protobuf.json_format import Parse + + +class BaseResponseIterator: + """Base Iterator over REST API responses. This class should not be used directly. + + Args: + response_message_cls (Union[proto.Message, google.protobuf.message.Message]): A response + class expected to be returned from an API. + + Raises: + ValueError: If `response_message_cls` is not a subclass of `proto.Message` or `google.protobuf.message.Message`. + """ + + def __init__( + self, + response_message_cls: Union[proto.Message, google.protobuf.message.Message], + ): + self._response_message_cls = response_message_cls + # Contains a list of JSON responses ready to be sent to user. + self._ready_objs: Deque[str] = deque() + # Current JSON response being built. + self._obj = "" + # Keeps track of the nesting level within a JSON object. + self._level = 0 + # Keeps track whether HTTP response is currently sending values + # inside of a string value. + self._in_string = False + # Whether an escape symbol "\" was encountered. + self._escape_next = False + + self._grab = types.MethodType(self._create_grab(), self) + + def _process_chunk(self, chunk: str): + if self._level == 0: + if chunk[0] != "[": + raise ValueError( + "Can only parse array of JSON objects, instead got %s" % chunk + ) + for char in chunk: + if char == "{": + if self._level == 1: + # Level 1 corresponds to the outermost JSON object + # (i.e. the one we care about). + self._obj = "" + if not self._in_string: + self._level += 1 + self._obj += char + elif char == "}": + self._obj += char + if not self._in_string: + self._level -= 1 + if not self._in_string and self._level == 1: + self._ready_objs.append(self._obj) + elif char == '"': + # Helps to deal with an escaped quotes inside of a string. + if not self._escape_next: + self._in_string = not self._in_string + self._obj += char + elif char in string.whitespace: + if self._in_string: + self._obj += char + elif char == "[": + if self._level == 0: + self._level += 1 + else: + self._obj += char + elif char == "]": + if self._level == 1: + self._level -= 1 + else: + self._obj += char + else: + self._obj += char + self._escape_next = not self._escape_next if char == "\\" else False + + def _create_grab(self): + if issubclass(self._response_message_cls, proto.Message): + + def grab(this): + return this._response_message_cls.from_json( + this._ready_objs.popleft(), ignore_unknown_fields=True + ) + + return grab + elif issubclass(self._response_message_cls, google.protobuf.message.Message): + + def grab(this): + return Parse(this._ready_objs.popleft(), this._response_message_cls()) + + return grab + else: + raise ValueError( + "Response message class must be a subclass of proto.Message or google.protobuf.message.Message." + ) diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_info.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_info.py new file mode 100644 index 0000000000000000000000000000000000000000..90926beb8c29e8591c65ee7a6bffe7b2419e7561 --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_info.py @@ -0,0 +1,108 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers for providing client information. + +Client information is used to send information about the calling client, +such as the library and Python version, to API services. +""" + +import platform +from typing import Union + +from google.api_core import version as api_core_version + +_PY_VERSION = platform.python_version() +_API_CORE_VERSION = api_core_version.__version__ + +_GRPC_VERSION: Union[str, None] + +try: + import grpc + + _GRPC_VERSION = grpc.__version__ +except ImportError: # pragma: NO COVER + _GRPC_VERSION = None + + +class ClientInfo(object): + """Client information used to generate a user-agent for API calls. + + This user-agent information is sent along with API calls to allow the + receiving service to do analytics on which versions of Python and Google + libraries are being used. + + Args: + python_version (str): The Python interpreter version, for example, + ``'3.9.6'``. + grpc_version (Optional[str]): The gRPC library version. + api_core_version (str): The google-api-core library version. + gapic_version (Optional[str]): The version of gapic-generated client + library, if the library was generated by gapic. + client_library_version (Optional[str]): The version of the client + library, generally used if the client library was not generated + by gapic or if additional functionality was built on top of + a gapic client library. + user_agent (Optional[str]): Prefix to the user agent header. This is + used to supply information such as application name or partner tool. + Recommended format: ``application-or-tool-ID/major.minor.version``. + rest_version (Optional[str]): A string with labeled versions of the + dependencies used for REST transport. + """ + + def __init__( + self, + python_version=_PY_VERSION, + grpc_version=_GRPC_VERSION, + api_core_version=_API_CORE_VERSION, + gapic_version=None, + client_library_version=None, + user_agent=None, + rest_version=None, + ): + self.python_version = python_version + self.grpc_version = grpc_version + self.api_core_version = api_core_version + self.gapic_version = gapic_version + self.client_library_version = client_library_version + self.user_agent = user_agent + self.rest_version = rest_version + + def to_user_agent(self): + """Returns the user-agent string for this client info.""" + + # Note: the order here is important as the internal metrics system + # expects these items to be in specific locations. + ua = "" + + if self.user_agent is not None: + ua += "{user_agent} " + + ua += "gl-python/{python_version} " + + if self.grpc_version is not None: + ua += "grpc/{grpc_version} " + + if self.rest_version is not None: + ua += "rest/{rest_version} " + + ua += "gax/{api_core_version} " + + if self.gapic_version is not None: + ua += "gapic/{gapic_version} " + + if self.client_library_version is not None: + ua += "gccl/{client_library_version} " + + return ua.format(**self.__dict__).strip() diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_logging.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_logging.py new file mode 100644 index 0000000000000000000000000000000000000000..837e3e0c455b4f3dc42c357ac18d379ebaa1e09b --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_logging.py @@ -0,0 +1,144 @@ +import logging +import json +import os + +from typing import List, Optional + +_LOGGING_INITIALIZED = False +_BASE_LOGGER_NAME = "google" + +# Fields to be included in the StructuredLogFormatter. +# +# TODO(https://github.com/googleapis/python-api-core/issues/761): Update this list to support additional logging fields. +_recognized_logging_fields = [ + "httpRequest", + "rpcName", + "serviceName", + "credentialsType", + "credentialsInfo", + "universeDomain", + "request", + "response", + "metadata", + "retryAttempt", + "httpResponse", +] # Additional fields to be Logged. + + +def logger_configured(logger) -> bool: + """Determines whether `logger` has non-default configuration + + Args: + logger: The logger to check. + + Returns: + bool: Whether the logger has any non-default configuration. + """ + return ( + logger.handlers != [] or logger.level != logging.NOTSET or not logger.propagate + ) + + +def initialize_logging(): + """Initializes "google" loggers, partly based on the environment variable + + Initializes the "google" logger and any loggers (at the "google" + level or lower) specified by the environment variable + GOOGLE_SDK_PYTHON_LOGGING_SCOPE, as long as none of these loggers + were previously configured. If any such loggers (including the + "google" logger) are initialized, they are set to NOT propagate + log events up to their parent loggers. + + This initialization is executed only once, and hence the + environment variable is only processed the first time this + function is called. + """ + global _LOGGING_INITIALIZED + if _LOGGING_INITIALIZED: + return + scopes = os.getenv("GOOGLE_SDK_PYTHON_LOGGING_SCOPE", "") + setup_logging(scopes) + _LOGGING_INITIALIZED = True + + +def parse_logging_scopes(scopes: Optional[str] = None) -> List[str]: + """Returns a list of logger names. + + Splits the single string of comma-separated logger names into a list of individual logger name strings. + + Args: + scopes: The name of a single logger. (In the future, this will be a comma-separated list of multiple loggers.) + + Returns: + A list of all the logger names in scopes. + """ + if not scopes: + return [] + # TODO(https://github.com/googleapis/python-api-core/issues/759): check if the namespace is a valid namespace. + # TODO(b/380481951): Support logging multiple scopes. + # TODO(b/380483756): Raise or log a warning for an invalid scope. + namespaces = [scopes] + return namespaces + + +def configure_defaults(logger): + """Configures `logger` to emit structured info to stdout.""" + if not logger_configured(logger): + console_handler = logging.StreamHandler() + logger.setLevel("DEBUG") + logger.propagate = False + formatter = StructuredLogFormatter() + console_handler.setFormatter(formatter) + logger.addHandler(console_handler) + + +def setup_logging(scopes: str = ""): + """Sets up logging for the specified `scopes`. + + If the loggers specified in `scopes` have not been previously + configured, this will configure them to emit structured log + entries to stdout, and to not propagate their log events to their + parent loggers. Additionally, if the "google" logger (whether it + was specified in `scopes` or not) was not previously configured, + it will also configure it to not propagate log events to the root + logger. + + Args: + scopes: The name of a single logger. (In the future, this will be a comma-separated list of multiple loggers.) + + """ + + # only returns valid logger scopes (namespaces) + # this list has at most one element. + logger_names = parse_logging_scopes(scopes) + + for namespace in logger_names: + # This will either create a module level logger or get the reference of the base logger instantiated above. + logger = logging.getLogger(namespace) + + # Configure default settings. + configure_defaults(logger) + + # disable log propagation at base logger level to the root logger only if a base logger is not already configured via code changes. + base_logger = logging.getLogger(_BASE_LOGGER_NAME) + if not logger_configured(base_logger): + base_logger.propagate = False + + +# TODO(https://github.com/googleapis/python-api-core/issues/763): Expand documentation. +class StructuredLogFormatter(logging.Formatter): + # TODO(https://github.com/googleapis/python-api-core/issues/761): ensure that additional fields such as + # function name, file name, and line no. appear in a log output. + def format(self, record: logging.LogRecord): + log_obj = { + "timestamp": self.formatTime(record), + "severity": record.levelname, + "name": record.name, + "message": record.getMessage(), + } + + for field_name in _recognized_logging_fields: + value = getattr(record, field_name, None) + if value is not None: + log_obj[field_name] = value + return json.dumps(log_obj) diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_options.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_options.py new file mode 100644 index 0000000000000000000000000000000000000000..d11665d22d86682497c185f47a4ea0167a25197e --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/client_options.py @@ -0,0 +1,153 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Client options class. + +Client options provide a consistent interface for user options to be defined +across clients. + +You can pass a client options object to a client. + +.. code-block:: python + + from google.api_core.client_options import ClientOptions + from google.cloud.vision_v1 import ImageAnnotatorClient + + def get_client_cert(): + # code to load client certificate and private key. + return client_cert_bytes, client_private_key_bytes + + options = ClientOptions(api_endpoint="foo.googleapis.com", + client_cert_source=get_client_cert) + + client = ImageAnnotatorClient(client_options=options) + +You can also pass a mapping object. + +.. code-block:: python + + from google.cloud.vision_v1 import ImageAnnotatorClient + + client = ImageAnnotatorClient( + client_options={ + "api_endpoint": "foo.googleapis.com", + "client_cert_source" : get_client_cert + }) + + +""" + +from typing import Callable, Mapping, Optional, Sequence, Tuple + + +class ClientOptions(object): + """Client Options used to set options on clients. + + Args: + api_endpoint (Optional[str]): The desired API endpoint, e.g., + compute.googleapis.com + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback + which returns client certificate bytes and private key bytes both in + PEM format. ``client_cert_source`` and ``client_encrypted_cert_source`` + are mutually exclusive. + client_encrypted_cert_source (Optional[Callable[[], Tuple[str, str, bytes]]]): + A callback which returns client certificate file path, encrypted + private key file path, and the passphrase bytes.``client_cert_source`` + and ``client_encrypted_cert_source`` are mutually exclusive. + quota_project_id (Optional[str]): A project name that a client's + quota belongs to. + credentials_file (Optional[str]): A path to a file storing credentials. + ``credentials_file` and ``api_key`` are mutually exclusive. + + .. warning:: + Important: If you accept a credential configuration (credential JSON/File/Stream) + from an external source for authentication to Google Cloud Platform, you must + validate it before providing it to any Google API or client library. Providing an + unvalidated credential configuration to Google APIs or libraries can compromise + the security of your systems and data. For more information, refer to + `Validate credential configurations from external sources`_. + + .. _Validate credential configurations from external sources: + + https://cloud.google.com/docs/authentication/external/externally-sourced-credentials + scopes (Optional[Sequence[str]]): OAuth access token override scopes. + api_key (Optional[str]): Google API key. ``credentials_file`` and + ``api_key`` are mutually exclusive. + api_audience (Optional[str]): The intended audience for the API calls + to the service that will be set when using certain 3rd party + authentication flows. Audience is typically a resource identifier. + If not set, the service endpoint value will be used as a default. + An example of a valid ``api_audience`` is: "https://language.googleapis.com". + universe_domain (Optional[str]): The desired universe domain. This must match + the one in credentials. If not set, the default universe domain is + `googleapis.com`. If both `api_endpoint` and `universe_domain` are set, + then `api_endpoint` is used as the service endpoint. If `api_endpoint` is + not specified, the format will be `{service}.{universe_domain}`. + + Raises: + ValueError: If both ``client_cert_source`` and ``client_encrypted_cert_source`` + are provided, or both ``credentials_file`` and ``api_key`` are provided. + """ + + def __init__( + self, + api_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + client_encrypted_cert_source: Optional[ + Callable[[], Tuple[str, str, bytes]] + ] = None, + quota_project_id: Optional[str] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + api_key: Optional[str] = None, + api_audience: Optional[str] = None, + universe_domain: Optional[str] = None, + ): + if client_cert_source and client_encrypted_cert_source: + raise ValueError( + "client_cert_source and client_encrypted_cert_source are mutually exclusive" + ) + if api_key and credentials_file: + raise ValueError("api_key and credentials_file are mutually exclusive") + self.api_endpoint = api_endpoint + self.client_cert_source = client_cert_source + self.client_encrypted_cert_source = client_encrypted_cert_source + self.quota_project_id = quota_project_id + self.credentials_file = credentials_file + self.scopes = scopes + self.api_key = api_key + self.api_audience = api_audience + self.universe_domain = universe_domain + + def __repr__(self) -> str: + return "ClientOptions: " + repr(self.__dict__) + + +def from_dict(options: Mapping[str, object]) -> ClientOptions: + """Construct a client options object from a mapping object. + + Args: + options (collections.abc.Mapping): A mapping object with client options. + See the docstring for ClientOptions for details on valid arguments. + """ + + client_options = ClientOptions() + + for key, value in options.items(): + if hasattr(client_options, key): + setattr(client_options, key, value) + else: + raise ValueError("ClientOptions does not accept an option '" + key + "'") + + return client_options diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/extended_operation.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/extended_operation.py new file mode 100644 index 0000000000000000000000000000000000000000..d474632baeb0617dea1039ec24b527897616c3bd --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/extended_operation.py @@ -0,0 +1,225 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Futures for extended long-running operations returned from Google Cloud APIs. + +These futures can be used to synchronously wait for the result of a +long-running operations using :meth:`ExtendedOperation.result`: + +.. code-block:: python + + extended_operation = my_api_client.long_running_method() + + extended_operation.result() + +Or asynchronously using callbacks and :meth:`Operation.add_done_callback`: + +.. code-block:: python + + extended_operation = my_api_client.long_running_method() + + def my_callback(ex_op): + print(f"Operation {ex_op.name} completed") + + extended_operation.add_done_callback(my_callback) + +""" + +import threading + +from google.api_core import exceptions +from google.api_core.future import polling + + +class ExtendedOperation(polling.PollingFuture): + """An ExtendedOperation future for interacting with a Google API Long-Running Operation. + + Args: + extended_operation (proto.Message): The initial operation. + refresh (Callable[[], type(extended_operation)]): A callable that returns + the latest state of the operation. + cancel (Callable[[], None]): A callable that tries to cancel the operation. + polling Optional(google.api_core.retry.Retry): The configuration used + for polling. This can be used to control how often :meth:`done` + is polled. If the ``timeout`` argument to :meth:`result` is + specified it will override the ``polling.timeout`` property. + retry Optional(google.api_core.retry.Retry): DEPRECATED use ``polling`` + instead. If specified it will override ``polling`` parameter to + maintain backward compatibility. + + Note: Most long-running API methods use google.api_core.operation.Operation + This class is a wrapper for a subset of methods that use alternative + Long-Running Operation (LRO) semantics. + + Note: there is not a concrete type the extended operation must be. + It MUST have fields that correspond to the following, POSSIBLY WITH DIFFERENT NAMES: + * name: str + * status: Union[str, bool, enum.Enum] + * error_code: int + * error_message: str + """ + + def __init__( + self, + extended_operation, + refresh, + cancel, + polling=polling.DEFAULT_POLLING, + **kwargs, + ): + super().__init__(polling=polling, **kwargs) + self._extended_operation = extended_operation + self._refresh = refresh + self._cancel = cancel + # Note: the extended operation does not give a good way to indicate cancellation. + # We make do with manually tracking cancellation and checking for doneness. + self._cancelled = False + self._completion_lock = threading.Lock() + # Invoke in case the operation came back already complete. + self._handle_refreshed_operation() + + # Note: the following four properties MUST be overridden in a subclass + # if, and only if, the fields in the corresponding extended operation message + # have different names. + # + # E.g. we have an extended operation class that looks like + # + # class MyOperation(proto.Message): + # moniker = proto.Field(proto.STRING, number=1) + # status_msg = proto.Field(proto.STRING, number=2) + # optional http_error_code = proto.Field(proto.INT32, number=3) + # optional http_error_msg = proto.Field(proto.STRING, number=4) + # + # the ExtendedOperation subclass would provide property overrides that map + # to these (poorly named) fields. + @property + def name(self): + return self._extended_operation.name + + @property + def status(self): + return self._extended_operation.status + + @property + def error_code(self): + return self._extended_operation.error_code + + @property + def error_message(self): + return self._extended_operation.error_message + + def __getattr__(self, name): + return getattr(self._extended_operation, name) + + def done(self, retry=None): + self._refresh_and_update(retry) + return self._extended_operation.done + + def cancel(self): + if self.done(): + return False + + self._cancel() + self._cancelled = True + return True + + def cancelled(self): + # TODO(dovs): there is not currently a good way to determine whether the + # operation has been cancelled. + # The best we can do is manually keep track of cancellation + # and check for doneness. + if not self._cancelled: + return False + + self._refresh_and_update() + return self._extended_operation.done + + def _refresh_and_update(self, retry=None): + if not self._extended_operation.done: + self._extended_operation = ( + self._refresh(retry=retry) if retry else self._refresh() + ) + self._handle_refreshed_operation() + + def _handle_refreshed_operation(self): + with self._completion_lock: + if not self._extended_operation.done: + return + + if self.error_code and self.error_message: + # Note: `errors` can be removed once proposal A from + # b/284179390 is implemented. + errors = [] + if hasattr(self, "error") and hasattr(self.error, "errors"): + errors = self.error.errors + exception = exceptions.from_http_status( + status_code=self.error_code, + message=self.error_message, + response=self._extended_operation, + errors=errors, + ) + self.set_exception(exception) + elif self.error_code or self.error_message: + exception = exceptions.GoogleAPICallError( + f"Unexpected error {self.error_code}: {self.error_message}" + ) + self.set_exception(exception) + else: + # Extended operations have no payload. + self.set_result(None) + + @classmethod + def make(cls, refresh, cancel, extended_operation, **kwargs): + """ + Return an instantiated ExtendedOperation (or child) that wraps + * a refresh callable + * a cancel callable (can be a no-op) + * an initial result + + .. note:: + It is the caller's responsibility to set up refresh and cancel + with their correct request argument. + The reason for this is that the services that use Extended Operations + have rpcs that look something like the following: + + // service.proto + service MyLongService { + rpc StartLongTask(StartLongTaskRequest) returns (ExtendedOperation) { + option (google.cloud.operation_service) = "CustomOperationService"; + } + } + + service CustomOperationService { + rpc Get(GetOperationRequest) returns (ExtendedOperation) { + option (google.cloud.operation_polling_method) = true; + } + } + + Any info needed for the poll, e.g. a name, path params, etc. + is held in the request, which the initial client method is in a much + better position to make made because the caller made the initial request. + + TL;DR: the caller sets up closures for refresh and cancel that carry + the properly configured requests. + + Args: + refresh (Callable[Optional[Retry]][type(extended_operation)]): A callable that + returns the latest state of the operation. + cancel (Callable[][Any]): A callable that tries to cancel the operation + on a best effort basis. + extended_operation (Any): The initial response of the long running method. + See the docstring for ExtendedOperation.__init__ for requirements on + the type and fields of extended_operation + """ + return cls(extended_operation, refresh, cancel, **kwargs) diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/general_helpers.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/general_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..a6af45b7a6f3e180af8ece0b9fbeb482d46b808c --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/general_helpers.py @@ -0,0 +1,16 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This import for backward compatibility only. +from functools import wraps # noqa: F401 pragma: NO COVER diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/page_iterator_async.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/page_iterator_async.py new file mode 100644 index 0000000000000000000000000000000000000000..c0725758ec7299dd1bb8a45bcb0299769cff3e45 --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/page_iterator_async.py @@ -0,0 +1,285 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""AsyncIO iterators for paging through paged API methods. + +These iterators simplify the process of paging through API responses +where the request takes a page token and the response is a list of results with +a token for the next page. See `list pagination`_ in the Google API Style Guide +for more details. + +.. _list pagination: + https://cloud.google.com/apis/design/design_patterns#list_pagination + +API clients that have methods that follow the list pagination pattern can +return an :class:`.AsyncIterator`: + + >>> results_iterator = await client.list_resources() + +Or you can walk your way through items and call off the search early if +you find what you're looking for (resulting in possibly fewer requests):: + + >>> async for resource in results_iterator: + ... print(resource.name) + ... if not resource.is_valid: + ... break + +At any point, you may check the number of items consumed by referencing the +``num_results`` property of the iterator:: + + >>> async for my_item in results_iterator: + ... if results_iterator.num_results >= 10: + ... break + +When iterating, not every new item will send a request to the server. +To iterate based on each page of items (where a page corresponds to +a request):: + + >>> async for page in results_iterator.pages: + ... print('=' * 20) + ... print(' Page number: {:d}'.format(iterator.page_number)) + ... print(' Items in page: {:d}'.format(page.num_items)) + ... print(' First item: {!r}'.format(next(page))) + ... print('Items remaining: {:d}'.format(page.remaining)) + ... print('Next page token: {}'.format(iterator.next_page_token)) + ==================== + Page number: 1 + Items in page: 1 + First item: + Items remaining: 0 + Next page token: eav1OzQB0OM8rLdGXOEsyQWSG + ==================== + Page number: 2 + Items in page: 19 + First item: + Items remaining: 18 + Next page token: None +""" + +import abc + +from google.api_core.page_iterator import Page + + +def _item_to_value_identity(iterator, item): + """An item to value transformer that returns the item un-changed.""" + # pylint: disable=unused-argument + # We are conforming to the interface defined by Iterator. + return item + + +class AsyncIterator(abc.ABC): + """A generic class for iterating through API list responses. + + Args: + client(google.cloud.client.Client): The API client. + item_to_value (Callable[google.api_core.page_iterator_async.AsyncIterator, Any]): + Callable to convert an item from the type in the raw API response + into the native object. Will be called with the iterator and a + single item. + page_token (str): A token identifying a page in a result set to start + fetching results from. + max_results (int): The maximum number of results to fetch. + """ + + def __init__( + self, + client, + item_to_value=_item_to_value_identity, + page_token=None, + max_results=None, + ): + self._started = False + self.__active_aiterator = None + + self.client = client + """Optional[Any]: The client that created this iterator.""" + self.item_to_value = item_to_value + """Callable[Iterator, Any]: Callable to convert an item from the type + in the raw API response into the native object. Will be called with + the iterator and a + single item. + """ + self.max_results = max_results + """int: The maximum number of results to fetch.""" + + # The attributes below will change over the life of the iterator. + self.page_number = 0 + """int: The current page of results.""" + self.next_page_token = page_token + """str: The token for the next page of results. If this is set before + the iterator starts, it effectively offsets the iterator to a + specific starting point.""" + self.num_results = 0 + """int: The total number of results fetched so far.""" + + @property + def pages(self): + """Iterator of pages in the response. + + returns: + types.GeneratorType[google.api_core.page_iterator.Page]: A + generator of page instances. + + raises: + ValueError: If the iterator has already been started. + """ + if self._started: + raise ValueError("Iterator has already started", self) + self._started = True + return self._page_aiter(increment=True) + + async def _items_aiter(self): + """Iterator for each item returned.""" + async for page in self._page_aiter(increment=False): + for item in page: + self.num_results += 1 + yield item + + def __aiter__(self): + """Iterator for each item returned. + + Returns: + types.GeneratorType[Any]: A generator of items from the API. + + Raises: + ValueError: If the iterator has already been started. + """ + if self._started: + raise ValueError("Iterator has already started", self) + self._started = True + return self._items_aiter() + + async def __anext__(self): + if self.__active_aiterator is None: + self.__active_aiterator = self.__aiter__() + return await self.__active_aiterator.__anext__() + + async def _page_aiter(self, increment): + """Generator of pages of API responses. + + Args: + increment (bool): Flag indicating if the total number of results + should be incremented on each page. This is useful since a page + iterator will want to increment by results per page while an + items iterator will want to increment per item. + + Yields: + Page: each page of items from the API. + """ + page = await self._next_page() + while page is not None: + self.page_number += 1 + if increment: + self.num_results += page.num_items + yield page + page = await self._next_page() + + @abc.abstractmethod + async def _next_page(self): + """Get the next page in the iterator. + + This does nothing and is intended to be over-ridden by subclasses + to return the next :class:`Page`. + + Raises: + NotImplementedError: Always, this method is abstract. + """ + raise NotImplementedError + + +class AsyncGRPCIterator(AsyncIterator): + """A generic class for iterating through gRPC list responses. + + .. note:: The class does not take a ``page_token`` argument because it can + just be specified in the ``request``. + + Args: + client (google.cloud.client.Client): The API client. This unused by + this class, but kept to satisfy the :class:`Iterator` interface. + method (Callable[protobuf.Message]): A bound gRPC method that should + take a single message for the request. + request (protobuf.Message): The request message. + items_field (str): The field in the response message that has the + items for the page. + item_to_value (Callable[GRPCIterator, Any]): Callable to convert an + item from the type in the JSON response into a native object. Will + be called with the iterator and a single item. + request_token_field (str): The field in the request message used to + specify the page token. + response_token_field (str): The field in the response message that has + the token for the next page. + max_results (int): The maximum number of results to fetch. + + .. autoattribute:: pages + """ + + _DEFAULT_REQUEST_TOKEN_FIELD = "page_token" + _DEFAULT_RESPONSE_TOKEN_FIELD = "next_page_token" + + def __init__( + self, + client, + method, + request, + items_field, + item_to_value=_item_to_value_identity, + request_token_field=_DEFAULT_REQUEST_TOKEN_FIELD, + response_token_field=_DEFAULT_RESPONSE_TOKEN_FIELD, + max_results=None, + ): + super().__init__(client, item_to_value, max_results=max_results) + self._method = method + self._request = request + self._items_field = items_field + self._request_token_field = request_token_field + self._response_token_field = response_token_field + + async def _next_page(self): + """Get the next page in the iterator. + + Returns: + Page: The next page in the iterator or :data:`None` if + there are no pages left. + """ + if not self._has_next_page(): + return None + + if self.next_page_token is not None: + setattr(self._request, self._request_token_field, self.next_page_token) + + response = await self._method(self._request) + + self.next_page_token = getattr(response, self._response_token_field) + items = getattr(response, self._items_field) + page = Page(self, items, self.item_to_value, raw_page=response) + + return page + + def _has_next_page(self): + """Determines whether or not there are more pages with results. + + Returns: + bool: Whether the iterator has more pages. + """ + if self.page_number == 0: + return True + + # Note: intentionally a falsy check instead of a None check. The RPC + # can return an empty string indicating no more pages. + if self.max_results is not None: + if self.num_results >= self.max_results: + return False + + return True if self.next_page_token else False diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/rest_streaming_async.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/rest_streaming_async.py new file mode 100644 index 0000000000000000000000000000000000000000..370c2b53f13883b2e0af678039c90e4774346e3e --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/rest_streaming_async.py @@ -0,0 +1,89 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers for asynchronous server-side streaming in REST.""" + +from typing import Union + +import proto + +try: + import google.auth.aio.transport +except ImportError as e: # pragma: NO COVER + raise ImportError( + "`google-api-core[async_rest]` is required to use asynchronous rest streaming. " + "Install the `async_rest` extra of `google-api-core` using " + "`pip install google-api-core[async_rest]`." + ) from e + +import google.protobuf.message +from google.api_core._rest_streaming_base import BaseResponseIterator + + +class AsyncResponseIterator(BaseResponseIterator): + """Asynchronous Iterator over REST API responses. + + Args: + response (google.auth.aio.transport.Response): An API response object. + response_message_cls (Union[proto.Message, google.protobuf.message.Message]): A response + class expected to be returned from an API. + + Raises: + ValueError: + - If `response_message_cls` is not a subclass of `proto.Message` or `google.protobuf.message.Message`. + """ + + def __init__( + self, + response: google.auth.aio.transport.Response, + response_message_cls: Union[proto.Message, google.protobuf.message.Message], + ): + self._response = response + self._chunk_size = 1024 + # TODO(https://github.com/googleapis/python-api-core/issues/703): mypy does not recognize the abstract content + # method as an async generator as it looks for the `yield` keyword in the implementation. + # Given that the abstract method is not implemented, mypy fails to recognize it as an async generator. + # mypy warnings are silenced until the linked issue is resolved. + self._response_itr = self._response.content(self._chunk_size).__aiter__() # type: ignore + super(AsyncResponseIterator, self).__init__( + response_message_cls=response_message_cls + ) + + async def __aenter__(self): + return self + + async def cancel(self): + """Cancel existing streaming operation.""" + await self._response.close() + + async def __anext__(self): + while not self._ready_objs: + try: + chunk = await self._response_itr.__anext__() + chunk = chunk.decode("utf-8") + self._process_chunk(chunk) + except StopAsyncIteration as e: + if self._level > 0: + raise ValueError("i Unfinished stream: %s" % self._obj) + raise e + except ValueError as e: + raise e + return self._grab() + + def __aiter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + """Cancel existing async streaming operation.""" + await self._response.close() diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__init__.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1724fdbd96597bf8e80a1059ae9b94353dbe69d8 --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__init__.py @@ -0,0 +1,52 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Retry implementation for Google API client libraries.""" + +from .retry_base import exponential_sleep_generator +from .retry_base import if_exception_type +from .retry_base import if_transient_error +from .retry_base import build_retry_error +from .retry_base import RetryFailureReason +from .retry_unary import Retry +from .retry_unary import retry_target +from .retry_unary_async import AsyncRetry +from .retry_unary_async import retry_target as retry_target_async +from .retry_streaming import StreamingRetry +from .retry_streaming import retry_target_stream +from .retry_streaming_async import AsyncStreamingRetry +from .retry_streaming_async import retry_target_stream as retry_target_stream_async + +# The following imports are for backwards compatibility with https://github.com/googleapis/python-api-core/blob/4d7d2edee2c108d43deb151e6e0fdceb56b73275/google/api_core/retry.py +# +# TODO: Revert these imports on the next major version release (https://github.com/googleapis/python-api-core/issues/576) +from google.api_core import datetime_helpers # noqa: F401 +from google.api_core import exceptions # noqa: F401 +from google.auth import exceptions as auth_exceptions # noqa: F401 + +__all__ = ( + "exponential_sleep_generator", + "if_exception_type", + "if_transient_error", + "build_retry_error", + "RetryFailureReason", + "Retry", + "AsyncRetry", + "StreamingRetry", + "AsyncStreamingRetry", + "retry_target", + "retry_target_async", + "retry_target_stream", + "retry_target_stream_async", +) diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_base.cpython-310.pyc b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfea605dc2d763bccabd2ff98e444c0a15b27fed Binary files /dev/null and b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_base.cpython-310.pyc differ diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_streaming.cpython-310.pyc b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_streaming.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dac83ee81977b73618b0a923642863f571c573e Binary files /dev/null and b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_streaming.cpython-310.pyc differ diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_streaming_async.cpython-310.pyc b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_streaming_async.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d90852455733f7c624bb627b685c4607f6e2a7b3 Binary files /dev/null and b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_streaming_async.cpython-310.pyc differ diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_unary.cpython-310.pyc b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_unary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f49033ef4032c55d9e662f490f40b46a772a7d84 Binary files /dev/null and b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_unary.cpython-310.pyc differ diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_unary_async.cpython-310.pyc b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_unary_async.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e43f371b8108a8a0ee4f1cf0069845d8df0006d Binary files /dev/null and b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/__pycache__/retry_unary_async.cpython-310.pyc differ diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_base.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_base.py new file mode 100644 index 0000000000000000000000000000000000000000..1606e0fe22f13a274d18d3f0adc909afb0e40e96 --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_base.py @@ -0,0 +1,361 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared classes and functions for retrying requests. + +:class:`_BaseRetry` is the base class for :class:`Retry`, +:class:`AsyncRetry`, :class:`StreamingRetry`, and :class:`AsyncStreamingRetry`. +""" + +from __future__ import annotations + +import logging +import random +import time + +from enum import Enum +from typing import Any, Callable, Optional, TYPE_CHECKING + +import requests.exceptions + +from google.api_core import exceptions +from google.auth import exceptions as auth_exceptions + +if TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 11): + from typing import Self + else: + from typing_extensions import Self + +_DEFAULT_INITIAL_DELAY = 1.0 # seconds +_DEFAULT_MAXIMUM_DELAY = 60.0 # seconds +_DEFAULT_DELAY_MULTIPLIER = 2.0 +_DEFAULT_DEADLINE = 60.0 * 2.0 # seconds + +_LOGGER = logging.getLogger("google.api_core.retry") + + +def if_exception_type( + *exception_types: type[Exception], +) -> Callable[[Exception], bool]: + """Creates a predicate to check if the exception is of a given type. + + Args: + exception_types (Sequence[:func:`type`]): The exception types to check + for. + + Returns: + Callable[Exception]: A predicate that returns True if the provided + exception is of the given type(s). + """ + + def if_exception_type_predicate(exception: Exception) -> bool: + """Bound predicate for checking an exception type.""" + return isinstance(exception, exception_types) + + return if_exception_type_predicate + + +# pylint: disable=invalid-name +# Pylint sees this as a constant, but it is also an alias that should be +# considered a function. +if_transient_error = if_exception_type( + exceptions.InternalServerError, + exceptions.TooManyRequests, + exceptions.ServiceUnavailable, + requests.exceptions.ConnectionError, + requests.exceptions.ChunkedEncodingError, + auth_exceptions.TransportError, +) +"""A predicate that checks if an exception is a transient API error. + +The following server errors are considered transient: + +- :class:`google.api_core.exceptions.InternalServerError` - HTTP 500, gRPC + ``INTERNAL(13)`` and its subclasses. +- :class:`google.api_core.exceptions.TooManyRequests` - HTTP 429 +- :class:`google.api_core.exceptions.ServiceUnavailable` - HTTP 503 +- :class:`requests.exceptions.ConnectionError` +- :class:`requests.exceptions.ChunkedEncodingError` - The server declared + chunked encoding but sent an invalid chunk. +- :class:`google.auth.exceptions.TransportError` - Used to indicate an + error occurred during an HTTP request. +""" +# pylint: enable=invalid-name + + +def exponential_sleep_generator( + initial: float, maximum: float, multiplier: float = _DEFAULT_DELAY_MULTIPLIER +): + """Generates sleep intervals based on the exponential back-off algorithm. + + This implements the `Truncated Exponential Back-off`_ algorithm. + + .. _Truncated Exponential Back-off: + https://cloud.google.com/storage/docs/exponential-backoff + + Args: + initial (float): The minimum amount of time to delay. This must + be greater than 0. + maximum (float): The maximum amount of time to delay. + multiplier (float): The multiplier applied to the delay. + + Yields: + float: successive sleep intervals. + """ + max_delay = min(initial, maximum) + while True: + yield random.uniform(0.0, max_delay) + max_delay = min(max_delay * multiplier, maximum) + + +class RetryFailureReason(Enum): + """ + The cause of a failed retry, used when building exceptions + """ + + TIMEOUT = 0 + NON_RETRYABLE_ERROR = 1 + + +def build_retry_error( + exc_list: list[Exception], + reason: RetryFailureReason, + timeout_val: float | None, + **kwargs: Any, +) -> tuple[Exception, Exception | None]: + """ + Default exception_factory implementation. + + Returns a RetryError if the failure is due to a timeout, otherwise + returns the last exception encountered. + + Args: + - exc_list: list of exceptions that occurred during the retry + - reason: reason for the retry failure. + Can be TIMEOUT or NON_RETRYABLE_ERROR + - timeout_val: the original timeout value for the retry (in seconds), for use in the exception message + + Returns: + - tuple: a tuple of the exception to be raised, and the cause exception if any + """ + if reason == RetryFailureReason.TIMEOUT: + # return RetryError with the most recent exception as the cause + src_exc = exc_list[-1] if exc_list else None + timeout_val_str = f"of {timeout_val:0.1f}s " if timeout_val is not None else "" + return ( + exceptions.RetryError( + f"Timeout {timeout_val_str}exceeded", + src_exc, + ), + src_exc, + ) + elif exc_list: + # return most recent exception encountered + return exc_list[-1], None + else: + # no exceptions were given in exc_list. Raise generic RetryError + return exceptions.RetryError("Unknown error", None), None + + +def _retry_error_helper( + exc: Exception, + deadline: float | None, + next_sleep: float, + error_list: list[Exception], + predicate_fn: Callable[[Exception], bool], + on_error_fn: Callable[[Exception], None] | None, + exc_factory_fn: Callable[ + [list[Exception], RetryFailureReason, float | None], + tuple[Exception, Exception | None], + ], + original_timeout: float | None, +): + """ + Shared logic for handling an error for all retry implementations + + - Raises an error on timeout or non-retryable error + - Calls on_error_fn if provided + - Logs the error + + Args: + - exc: the exception that was raised + - deadline: the deadline for the retry, calculated as a diff from time.monotonic() + - next_sleep: the next sleep interval + - error_list: the list of exceptions that have been raised so far + - predicate_fn: takes `exc` and returns true if the operation should be retried + - on_error_fn: callback to execute when a retryable error occurs + - exc_factory_fn: callback used to build the exception to be raised on terminal failure + - original_timeout_val: the original timeout value for the retry (in seconds), + to be passed to the exception factory for building an error message + """ + error_list.append(exc) + if not predicate_fn(exc): + final_exc, source_exc = exc_factory_fn( + error_list, + RetryFailureReason.NON_RETRYABLE_ERROR, + original_timeout, + ) + raise final_exc from source_exc + if on_error_fn is not None: + on_error_fn(exc) + if deadline is not None and time.monotonic() + next_sleep > deadline: + final_exc, source_exc = exc_factory_fn( + error_list, + RetryFailureReason.TIMEOUT, + original_timeout, + ) + raise final_exc from source_exc + _LOGGER.debug( + "Retrying due to {}, sleeping {:.1f}s ...".format(error_list[-1], next_sleep) + ) + + +class _BaseRetry(object): + """ + Base class for retry configuration objects. This class is intended to capture retry + and backoff configuration that is common to both synchronous and asynchronous retries, + for both unary and streaming RPCs. It is not intended to be instantiated directly, + but rather to be subclassed by the various retry configuration classes. + """ + + def __init__( + self, + predicate: Callable[[Exception], bool] = if_transient_error, + initial: float = _DEFAULT_INITIAL_DELAY, + maximum: float = _DEFAULT_MAXIMUM_DELAY, + multiplier: float = _DEFAULT_DELAY_MULTIPLIER, + timeout: Optional[float] = _DEFAULT_DEADLINE, + on_error: Optional[Callable[[Exception], Any]] = None, + **kwargs: Any, + ) -> None: + self._predicate = predicate + self._initial = initial + self._multiplier = multiplier + self._maximum = maximum + self._timeout = kwargs.get("deadline", timeout) + self._deadline = self._timeout + self._on_error = on_error + + def __call__(self, *args, **kwargs) -> Any: + raise NotImplementedError("Not implemented in base class") + + @property + def deadline(self) -> float | None: + """ + DEPRECATED: use ``timeout`` instead. Refer to the ``Retry`` class + documentation for details. + """ + return self._timeout + + @property + def timeout(self) -> float | None: + return self._timeout + + def with_deadline(self, deadline: float | None) -> Self: + """Return a copy of this retry with the given timeout. + + DEPRECATED: use :meth:`with_timeout` instead. Refer to the ``Retry`` class + documentation for details. + + Args: + deadline (float|None): How long to keep retrying, in seconds. If None, + no timeout is enforced. + + Returns: + Retry: A new retry instance with the given timeout. + """ + return self.with_timeout(deadline) + + def with_timeout(self, timeout: float | None) -> Self: + """Return a copy of this retry with the given timeout. + + Args: + timeout (float): How long to keep retrying, in seconds. If None, + no timeout will be enforced. + + Returns: + Retry: A new retry instance with the given timeout. + """ + return type(self)( + predicate=self._predicate, + initial=self._initial, + maximum=self._maximum, + multiplier=self._multiplier, + timeout=timeout, + on_error=self._on_error, + ) + + def with_predicate(self, predicate: Callable[[Exception], bool]) -> Self: + """Return a copy of this retry with the given predicate. + + Args: + predicate (Callable[Exception]): A callable that should return + ``True`` if the given exception is retryable. + + Returns: + Retry: A new retry instance with the given predicate. + """ + return type(self)( + predicate=predicate, + initial=self._initial, + maximum=self._maximum, + multiplier=self._multiplier, + timeout=self._timeout, + on_error=self._on_error, + ) + + def with_delay( + self, + initial: Optional[float] = None, + maximum: Optional[float] = None, + multiplier: Optional[float] = None, + ) -> Self: + """Return a copy of this retry with the given delay options. + + Args: + initial (float): The minimum amount of time to delay (in seconds). This must + be greater than 0. If None, the current value is used. + maximum (float): The maximum amount of time to delay (in seconds). If None, the + current value is used. + multiplier (float): The multiplier applied to the delay. If None, the current + value is used. + + Returns: + Retry: A new retry instance with the given delay options. + """ + return type(self)( + predicate=self._predicate, + initial=initial if initial is not None else self._initial, + maximum=maximum if maximum is not None else self._maximum, + multiplier=multiplier if multiplier is not None else self._multiplier, + timeout=self._timeout, + on_error=self._on_error, + ) + + def __str__(self) -> str: + return ( + "<{} predicate={}, initial={:.1f}, maximum={:.1f}, " + "multiplier={:.1f}, timeout={}, on_error={}>".format( + type(self).__name__, + self._predicate, + self._initial, + self._maximum, + self._multiplier, + self._timeout, # timeout can be None, thus no {:.1f} + self._on_error, + ) + ) diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_streaming.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_streaming.py new file mode 100644 index 0000000000000000000000000000000000000000..00666841cc95bb73160840f4faa29e63e5376653 --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_streaming.py @@ -0,0 +1,263 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Generator wrapper for retryable streaming RPCs. +""" +from __future__ import annotations + +from typing import ( + Callable, + Optional, + List, + Tuple, + Iterable, + Generator, + TypeVar, + Any, + TYPE_CHECKING, +) + +import sys +import time +import functools + +from google.api_core.retry.retry_base import _BaseRetry +from google.api_core.retry.retry_base import _retry_error_helper +from google.api_core.retry import exponential_sleep_generator +from google.api_core.retry import build_retry_error +from google.api_core.retry import RetryFailureReason + +if TYPE_CHECKING: + if sys.version_info >= (3, 10): + from typing import ParamSpec + else: + from typing_extensions import ParamSpec + + _P = ParamSpec("_P") # target function call parameters + _Y = TypeVar("_Y") # yielded values + + +def retry_target_stream( + target: Callable[_P, Iterable[_Y]], + predicate: Callable[[Exception], bool], + sleep_generator: Iterable[float], + timeout: Optional[float] = None, + on_error: Optional[Callable[[Exception], None]] = None, + exception_factory: Callable[ + [List[Exception], RetryFailureReason, Optional[float]], + Tuple[Exception, Optional[Exception]], + ] = build_retry_error, + init_args: tuple = (), + init_kwargs: dict = {}, + **kwargs, +) -> Generator[_Y, Any, None]: + """Create a generator wrapper that retries the wrapped stream if it fails. + + This is the lowest-level retry helper. Generally, you'll use the + higher-level retry helper :class:`Retry`. + + Args: + target: The generator function to call and retry. + predicate: A callable used to determine if an + exception raised by the target should be considered retryable. + It should return True to retry or False otherwise. + sleep_generator: An infinite iterator that determines + how long to sleep between retries. + timeout: How long to keep retrying the target. + Note: timeout is only checked before initiating a retry, so the target may + run past the timeout value as long as it is healthy. + on_error: If given, the on_error callback will be called with each + retryable exception raised by the target. Any error raised by this + function will *not* be caught. + exception_factory: A function that is called when the retryable reaches + a terminal failure state, used to construct an exception to be raised. + It takes a list of all exceptions encountered, a retry.RetryFailureReason + enum indicating the failure cause, and the original timeout value + as arguments. It should return a tuple of the exception to be raised, + along with the cause exception if any. The default implementation will raise + a RetryError on timeout, or the last exception encountered otherwise. + init_args: Positional arguments to pass to the target function. + init_kwargs: Keyword arguments to pass to the target function. + + Returns: + Generator: A retryable generator that wraps the target generator function. + + Raises: + ValueError: If the sleep generator stops yielding values. + Exception: a custom exception specified by the exception_factory if provided. + If no exception_factory is provided: + google.api_core.RetryError: If the timeout is exceeded while retrying. + Exception: If the target raises an error that isn't retryable. + """ + + timeout = kwargs.get("deadline", timeout) + deadline: Optional[float] = ( + time.monotonic() + timeout if timeout is not None else None + ) + error_list: list[Exception] = [] + + for sleep in sleep_generator: + # Start a new retry loop + try: + # Note: in the future, we can add a ResumptionStrategy object + # to generate new args between calls. For now, use the same args + # for each attempt. + subgenerator = target(*init_args, **init_kwargs) + return (yield from subgenerator) + # handle exceptions raised by the subgenerator + # pylint: disable=broad-except + # This function explicitly must deal with broad exceptions. + except Exception as exc: + # defer to shared logic for handling errors + _retry_error_helper( + exc, + deadline, + sleep, + error_list, + predicate, + on_error, + exception_factory, + timeout, + ) + # if exception not raised, sleep before next attempt + time.sleep(sleep) + + raise ValueError("Sleep generator stopped yielding sleep values.") + + +class StreamingRetry(_BaseRetry): + """Exponential retry decorator for streaming synchronous RPCs. + + This class returns a Generator when called, which wraps the target + stream in retry logic. If any exception is raised by the target, the + entire stream will be retried within the wrapper. + + Although the default behavior is to retry transient API errors, a + different predicate can be provided to retry other exceptions. + + Important Note: when a stream encounters a retryable error, it will + silently construct a fresh iterator instance in the background + and continue yielding (likely duplicate) values as if no error occurred. + This is the most general way to retry a stream, but it often is not the + desired behavior. Example: iter([1, 2, 1/0]) -> [1, 2, 1, 2, ...] + + There are two ways to build more advanced retry logic for streams: + + 1. Wrap the target + Use a ``target`` that maintains state between retries, and creates a + different generator on each retry call. For example, you can wrap a + network call in a function that modifies the request based on what has + already been returned: + + .. code-block:: python + + def attempt_with_modified_request(target, request, seen_items=[]): + # remove seen items from request on each attempt + new_request = modify_request(request, seen_items) + new_generator = target(new_request) + for item in new_generator: + yield item + seen_items.append(item) + + retry_wrapped_fn = StreamingRetry()(attempt_with_modified_request) + retryable_generator = retry_wrapped_fn(target, request) + + 2. Wrap the retry generator + Alternatively, you can wrap the retryable generator itself before + passing it to the end-user to add a filter on the stream. For + example, you can keep track of the items that were successfully yielded + in previous retry attempts, and only yield new items when the + new attempt surpasses the previous ones: + + .. code-block:: python + + def retryable_with_filter(target): + stream_idx = 0 + # reset stream_idx when the stream is retried + def on_error(e): + nonlocal stream_idx + stream_idx = 0 + # build retryable + retryable_gen = StreamingRetry(...)(target) + # keep track of what has been yielded out of filter + seen_items = [] + for item in retryable_gen(): + if stream_idx >= len(seen_items): + seen_items.append(item) + yield item + elif item != seen_items[stream_idx]: + raise ValueError("Stream differs from last attempt") + stream_idx += 1 + + filter_retry_wrapped = retryable_with_filter(target) + + Args: + predicate (Callable[Exception]): A callable that should return ``True`` + if the given exception is retryable. + initial (float): The minimum amount of time to delay in seconds. This + must be greater than 0. + maximum (float): The maximum amount of time to delay in seconds. + multiplier (float): The multiplier applied to the delay. + timeout (float): How long to keep retrying, in seconds. + Note: timeout is only checked before initiating a retry, so the target may + run past the timeout value as long as it is healthy. + on_error (Callable[Exception]): A function to call while processing + a retryable exception. Any error raised by this function will + *not* be caught. + deadline (float): DEPRECATED: use `timeout` instead. For backward + compatibility, if specified it will override the ``timeout`` parameter. + """ + + def __call__( + self, + func: Callable[_P, Iterable[_Y]], + on_error: Callable[[Exception], Any] | None = None, + ) -> Callable[_P, Generator[_Y, Any, None]]: + """Wrap a callable with retry behavior. + + Args: + func (Callable): The callable to add retry behavior to. + on_error (Optional[Callable[Exception]]): If given, the + on_error callback will be called with each retryable exception + raised by the wrapped function. Any error raised by this + function will *not* be caught. If on_error was specified in the + constructor, this value will be ignored. + + Returns: + Callable: A callable that will invoke ``func`` with retry + behavior. + """ + if self._on_error is not None: + on_error = self._on_error + + @functools.wraps(func) + def retry_wrapped_func( + *args: _P.args, **kwargs: _P.kwargs + ) -> Generator[_Y, Any, None]: + """A wrapper that calls target function with retry.""" + sleep_generator = exponential_sleep_generator( + self._initial, self._maximum, multiplier=self._multiplier + ) + return retry_target_stream( + func, + predicate=self._predicate, + sleep_generator=sleep_generator, + timeout=self._timeout, + on_error=on_error, + init_args=args, + init_kwargs=kwargs, + ) + + return retry_wrapped_func diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_streaming_async.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_streaming_async.py new file mode 100644 index 0000000000000000000000000000000000000000..942abf5fe1b5b96efa3234bc0ced222463882cf1 --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_streaming_async.py @@ -0,0 +1,325 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Generator wrapper for retryable async streaming RPCs. +""" +from __future__ import annotations + +from typing import ( + cast, + Any, + Callable, + Iterable, + AsyncIterator, + AsyncIterable, + Awaitable, + TypeVar, + AsyncGenerator, + TYPE_CHECKING, +) + +import asyncio +import time +import sys +import functools + +from google.api_core.retry.retry_base import _BaseRetry +from google.api_core.retry.retry_base import _retry_error_helper +from google.api_core.retry import exponential_sleep_generator +from google.api_core.retry import build_retry_error +from google.api_core.retry import RetryFailureReason + + +if TYPE_CHECKING: + if sys.version_info >= (3, 10): + from typing import ParamSpec + else: + from typing_extensions import ParamSpec + + _P = ParamSpec("_P") # target function call parameters + _Y = TypeVar("_Y") # yielded values + + +async def retry_target_stream( + target: Callable[_P, AsyncIterable[_Y] | Awaitable[AsyncIterable[_Y]]], + predicate: Callable[[Exception], bool], + sleep_generator: Iterable[float], + timeout: float | None = None, + on_error: Callable[[Exception], None] | None = None, + exception_factory: Callable[ + [list[Exception], RetryFailureReason, float | None], + tuple[Exception, Exception | None], + ] = build_retry_error, + init_args: tuple = (), + init_kwargs: dict = {}, + **kwargs, +) -> AsyncGenerator[_Y, None]: + """Create a generator wrapper that retries the wrapped stream if it fails. + + This is the lowest-level retry helper. Generally, you'll use the + higher-level retry helper :class:`AsyncRetry`. + + Args: + target: The generator function to call and retry. + predicate: A callable used to determine if an + exception raised by the target should be considered retryable. + It should return True to retry or False otherwise. + sleep_generator: An infinite iterator that determines + how long to sleep between retries. + timeout: How long to keep retrying the target. + Note: timeout is only checked before initiating a retry, so the target may + run past the timeout value as long as it is healthy. + on_error: If given, the on_error callback will be called with each + retryable exception raised by the target. Any error raised by this + function will *not* be caught. + exception_factory: A function that is called when the retryable reaches + a terminal failure state, used to construct an exception to be raised. + It takes a list of all exceptions encountered, a retry.RetryFailureReason + enum indicating the failure cause, and the original timeout value + as arguments. It should return a tuple of the exception to be raised, + along with the cause exception if any. The default implementation will raise + a RetryError on timeout, or the last exception encountered otherwise. + init_args: Positional arguments to pass to the target function. + init_kwargs: Keyword arguments to pass to the target function. + + Returns: + AsyncGenerator: A retryable generator that wraps the target generator function. + + Raises: + ValueError: If the sleep generator stops yielding values. + Exception: a custom exception specified by the exception_factory if provided. + If no exception_factory is provided: + google.api_core.RetryError: If the timeout is exceeded while retrying. + Exception: If the target raises an error that isn't retryable. + """ + target_iterator: AsyncIterator[_Y] | None = None + timeout = kwargs.get("deadline", timeout) + deadline = time.monotonic() + timeout if timeout else None + # keep track of retryable exceptions we encounter to pass in to exception_factory + error_list: list[Exception] = [] + target_is_generator: bool | None = None + + for sleep in sleep_generator: + # Start a new retry loop + try: + # Note: in the future, we can add a ResumptionStrategy object + # to generate new args between calls. For now, use the same args + # for each attempt. + target_output: AsyncIterable[_Y] | Awaitable[AsyncIterable[_Y]] = target( + *init_args, **init_kwargs + ) + try: + # gapic functions return the generator behind an awaitable + # unwrap the awaitable so we can work with the generator directly + target_output = await target_output # type: ignore + except TypeError: + # was not awaitable, continue + pass + target_iterator = cast(AsyncIterable["_Y"], target_output).__aiter__() + + if target_is_generator is None: + # Check if target supports generator features (asend, athrow, aclose) + target_is_generator = bool(getattr(target_iterator, "asend", None)) + + sent_in = None + while True: + ## Read from target_iterator + # If the target is a generator, we will advance it with `asend` + # otherwise, we will use `anext` + if target_is_generator: + next_value = await target_iterator.asend(sent_in) # type: ignore + else: + next_value = await target_iterator.__anext__() + ## Yield from Wrapper to caller + try: + # yield latest value from target + # exceptions from `athrow` and `aclose` are injected here + sent_in = yield next_value + except GeneratorExit: + # if wrapper received `aclose` while waiting on yield, + # it will raise GeneratorExit here + if target_is_generator: + # pass to inner target_iterator for handling + await cast(AsyncGenerator["_Y", None], target_iterator).aclose() + else: + raise + return + except: # noqa: E722 + # bare except catches any exception passed to `athrow` + if target_is_generator: + # delegate error handling to target_iterator + await cast(AsyncGenerator["_Y", None], target_iterator).athrow( + cast(BaseException, sys.exc_info()[1]) + ) + else: + raise + return + except StopAsyncIteration: + # if iterator exhausted, return + return + # handle exceptions raised by the target_iterator + # pylint: disable=broad-except + # This function explicitly must deal with broad exceptions. + except Exception as exc: + # defer to shared logic for handling errors + _retry_error_helper( + exc, + deadline, + sleep, + error_list, + predicate, + on_error, + exception_factory, + timeout, + ) + # if exception not raised, sleep before next attempt + await asyncio.sleep(sleep) + finally: + if target_is_generator and target_iterator is not None: + await cast(AsyncGenerator["_Y", None], target_iterator).aclose() + raise ValueError("Sleep generator stopped yielding sleep values.") + + +class AsyncStreamingRetry(_BaseRetry): + """Exponential retry decorator for async streaming rpcs. + + This class returns an AsyncGenerator when called, which wraps the target + stream in retry logic. If any exception is raised by the target, the + entire stream will be retried within the wrapper. + + Although the default behavior is to retry transient API errors, a + different predicate can be provided to retry other exceptions. + + Important Note: when a stream is encounters a retryable error, it will + silently construct a fresh iterator instance in the background + and continue yielding (likely duplicate) values as if no error occurred. + This is the most general way to retry a stream, but it often is not the + desired behavior. Example: iter([1, 2, 1/0]) -> [1, 2, 1, 2, ...] + + There are two ways to build more advanced retry logic for streams: + + 1. Wrap the target + Use a ``target`` that maintains state between retries, and creates a + different generator on each retry call. For example, you can wrap a + grpc call in a function that modifies the request based on what has + already been returned: + + .. code-block:: python + + async def attempt_with_modified_request(target, request, seen_items=[]): + # remove seen items from request on each attempt + new_request = modify_request(request, seen_items) + new_generator = await target(new_request) + async for item in new_generator: + yield item + seen_items.append(item) + + retry_wrapped = AsyncRetry(is_stream=True,...)(attempt_with_modified_request, target, request, []) + + 2. Wrap the retry generator + Alternatively, you can wrap the retryable generator itself before + passing it to the end-user to add a filter on the stream. For + example, you can keep track of the items that were successfully yielded + in previous retry attempts, and only yield new items when the + new attempt surpasses the previous ones: + + .. code-block:: python + + async def retryable_with_filter(target): + stream_idx = 0 + # reset stream_idx when the stream is retried + def on_error(e): + nonlocal stream_idx + stream_idx = 0 + # build retryable + retryable_gen = AsyncRetry(is_stream=True, ...)(target) + # keep track of what has been yielded out of filter + seen_items = [] + async for item in retryable_gen: + if stream_idx >= len(seen_items): + yield item + seen_items.append(item) + elif item != previous_stream[stream_idx]: + raise ValueError("Stream differs from last attempt")" + stream_idx += 1 + + filter_retry_wrapped = retryable_with_filter(target) + + Args: + predicate (Callable[Exception]): A callable that should return ``True`` + if the given exception is retryable. + initial (float): The minimum amount of time to delay in seconds. This + must be greater than 0. + maximum (float): The maximum amount of time to delay in seconds. + multiplier (float): The multiplier applied to the delay. + timeout (Optional[float]): How long to keep retrying in seconds. + Note: timeout is only checked before initiating a retry, so the target may + run past the timeout value as long as it is healthy. + on_error (Optional[Callable[Exception]]): A function to call while processing + a retryable exception. Any error raised by this function will + *not* be caught. + is_stream (bool): Indicates whether the input function + should be treated as a stream function (i.e. an AsyncGenerator, + or function or coroutine that returns an AsyncIterable). + If True, the iterable will be wrapped with retry logic, and any + failed outputs will restart the stream. If False, only the input + function call itself will be retried. Defaults to False. + To avoid duplicate values, retryable streams should typically be + wrapped in additional filter logic before use. + deadline (float): DEPRECATED use ``timeout`` instead. If set it will + override ``timeout`` parameter. + """ + + def __call__( + self, + func: Callable[..., AsyncIterable[_Y] | Awaitable[AsyncIterable[_Y]]], + on_error: Callable[[Exception], Any] | None = None, + ) -> Callable[_P, Awaitable[AsyncGenerator[_Y, None]]]: + """Wrap a callable with retry behavior. + + Args: + func (Callable): The callable or stream to add retry behavior to. + on_error (Optional[Callable[Exception]]): If given, the + on_error callback will be called with each retryable exception + raised by the wrapped function. Any error raised by this + function will *not* be caught. If on_error was specified in the + constructor, this value will be ignored. + + Returns: + Callable: A callable that will invoke ``func`` with retry + behavior. + """ + if self._on_error is not None: + on_error = self._on_error + + @functools.wraps(func) + async def retry_wrapped_func( + *args: _P.args, **kwargs: _P.kwargs + ) -> AsyncGenerator[_Y, None]: + """A wrapper that calls target function with retry.""" + sleep_generator = exponential_sleep_generator( + self._initial, self._maximum, multiplier=self._multiplier + ) + return retry_target_stream( + func, + self._predicate, + sleep_generator, + self._timeout, + on_error, + init_args=args, + init_kwargs=kwargs, + ) + + return retry_wrapped_func diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_unary.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_unary.py new file mode 100644 index 0000000000000000000000000000000000000000..d5dff663f2963390a0d2436cb1db486c074de26a --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_unary.py @@ -0,0 +1,301 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers for retrying functions with exponential back-off. + +The :class:`Retry` decorator can be used to retry functions that raise +exceptions using exponential backoff. Because a exponential sleep algorithm is +used, the retry is limited by a `timeout`. The timeout determines the window +in which retries will be attempted. This is used instead of total number of retries +because it is difficult to ascertain the amount of time a function can block +when using total number of retries and exponential backoff. + +By default, this decorator will retry transient +API errors (see :func:`if_transient_error`). For example: + +.. code-block:: python + + @retry.Retry() + def call_flaky_rpc(): + return client.flaky_rpc() + + # Will retry flaky_rpc() if it raises transient API errors. + result = call_flaky_rpc() + +You can pass a custom predicate to retry on different exceptions, such as +waiting for an eventually consistent item to be available: + +.. code-block:: python + + @retry.Retry(predicate=if_exception_type(exceptions.NotFound)) + def check_if_exists(): + return client.does_thing_exist() + + is_available = check_if_exists() + +Some client library methods apply retry automatically. These methods can accept +a ``retry`` parameter that allows you to configure the behavior: + +.. code-block:: python + + my_retry = retry.Retry(timeout=60) + result = client.some_method(retry=my_retry) + +""" + +from __future__ import annotations + +import functools +import sys +import time +import inspect +import warnings +from typing import Any, Callable, Iterable, TypeVar, TYPE_CHECKING + +from google.api_core.retry.retry_base import _BaseRetry +from google.api_core.retry.retry_base import _retry_error_helper +from google.api_core.retry.retry_base import exponential_sleep_generator +from google.api_core.retry.retry_base import build_retry_error +from google.api_core.retry.retry_base import RetryFailureReason + + +if TYPE_CHECKING: + if sys.version_info >= (3, 10): + from typing import ParamSpec + else: + from typing_extensions import ParamSpec + + _P = ParamSpec("_P") # target function call parameters + _R = TypeVar("_R") # target function returned value + +_ASYNC_RETRY_WARNING = "Using the synchronous google.api_core.retry.Retry with asynchronous calls may lead to unexpected results. Please use google.api_core.retry_async.AsyncRetry instead." + + +def retry_target( + target: Callable[[], _R], + predicate: Callable[[Exception], bool], + sleep_generator: Iterable[float], + timeout: float | None = None, + on_error: Callable[[Exception], None] | None = None, + exception_factory: Callable[ + [list[Exception], RetryFailureReason, float | None], + tuple[Exception, Exception | None], + ] = build_retry_error, + **kwargs, +): + """Call a function and retry if it fails. + + This is the lowest-level retry helper. Generally, you'll use the + higher-level retry helper :class:`Retry`. + + Args: + target(Callable): The function to call and retry. This must be a + nullary function - apply arguments with `functools.partial`. + predicate (Callable[Exception]): A callable used to determine if an + exception raised by the target should be considered retryable. + It should return True to retry or False otherwise. + sleep_generator (Iterable[float]): An infinite iterator that determines + how long to sleep between retries. + timeout (Optional[float]): How long to keep retrying the target. + Note: timeout is only checked before initiating a retry, so the target may + run past the timeout value as long as it is healthy. + on_error (Optional[Callable[Exception]]): If given, the on_error + callback will be called with each retryable exception raised by the + target. Any error raised by this function will *not* be caught. + exception_factory: A function that is called when the retryable reaches + a terminal failure state, used to construct an exception to be raised. + It takes a list of all exceptions encountered, a retry.RetryFailureReason + enum indicating the failure cause, and the original timeout value + as arguments. It should return a tuple of the exception to be raised, + along with the cause exception if any. The default implementation will raise + a RetryError on timeout, or the last exception encountered otherwise. + deadline (float): DEPRECATED: use ``timeout`` instead. For backward + compatibility, if specified it will override ``timeout`` parameter. + + Returns: + Any: the return value of the target function. + + Raises: + ValueError: If the sleep generator stops yielding values. + Exception: a custom exception specified by the exception_factory if provided. + If no exception_factory is provided: + google.api_core.RetryError: If the timeout is exceeded while retrying. + Exception: If the target raises an error that isn't retryable. + """ + + timeout = kwargs.get("deadline", timeout) + + deadline = time.monotonic() + timeout if timeout is not None else None + error_list: list[Exception] = [] + + for sleep in sleep_generator: + try: + result = target() + if inspect.isawaitable(result): + warnings.warn(_ASYNC_RETRY_WARNING) + return result + + # pylint: disable=broad-except + # This function explicitly must deal with broad exceptions. + except Exception as exc: + # defer to shared logic for handling errors + _retry_error_helper( + exc, + deadline, + sleep, + error_list, + predicate, + on_error, + exception_factory, + timeout, + ) + # if exception not raised, sleep before next attempt + time.sleep(sleep) + + raise ValueError("Sleep generator stopped yielding sleep values.") + + +class Retry(_BaseRetry): + """Exponential retry decorator for unary synchronous RPCs. + + This class is a decorator used to add retry or polling behavior to an RPC + call. + + Although the default behavior is to retry transient API errors, a + different predicate can be provided to retry other exceptions. + + There are two important concepts that retry/polling behavior may operate on, + Deadline and Timeout, which need to be properly defined for the correct + usage of this class and the rest of the library. + + Deadline: a fixed point in time by which a certain operation must + terminate. For example, if a certain operation has a deadline + "2022-10-18T23:30:52.123Z" it must terminate (successfully or with an + error) by that time, regardless of when it was started or whether it + was started at all. + + Timeout: the maximum duration of time after which a certain operation + must terminate (successfully or with an error). The countdown begins right + after an operation was started. For example, if an operation was started at + 09:24:00 with timeout of 75 seconds, it must terminate no later than + 09:25:15. + + Unfortunately, in the past this class (and the api-core library as a whole) has not + been properly distinguishing the concepts of "timeout" and "deadline", and the + ``deadline`` parameter has meant ``timeout``. That is why + ``deadline`` has been deprecated and ``timeout`` should be used instead. If the + ``deadline`` parameter is set, it will override the ``timeout`` parameter. + In other words, ``retry.deadline`` should be treated as just a deprecated alias for + ``retry.timeout``. + + Said another way, it is safe to assume that this class and the rest of this + library operate in terms of timeouts (not deadlines) unless explicitly + noted the usage of deadline semantics. + + It is also important to + understand the three most common applications of the Timeout concept in the + context of this library. + + Usually the generic Timeout term may stand for one of the following actual + timeouts: RPC Timeout, Retry Timeout, or Polling Timeout. + + RPC Timeout: a value supplied by the client to the server so + that the server side knows the maximum amount of time it is expected to + spend handling that specific RPC. For example, in the case of gRPC transport, + RPC Timeout is represented by setting "grpc-timeout" header in the HTTP2 + request. The `timeout` property of this class normally never represents the + RPC Timeout as it is handled separately by the ``google.api_core.timeout`` + module of this library. + + Retry Timeout: this is the most common meaning of the ``timeout`` property + of this class, and defines how long a certain RPC may be retried in case + the server returns an error. + + Polling Timeout: defines how long the + client side is allowed to call the polling RPC repeatedly to check a status of a + long-running operation. Each polling RPC is + expected to succeed (its errors are supposed to be handled by the retry + logic). The decision as to whether a new polling attempt needs to be made is based + not on the RPC status code but on the status of the returned + status of an operation. In other words: we will poll a long-running operation until + the operation is done or the polling timeout expires. Each poll will inform us of + the status of the operation. The poll consists of an RPC to the server that may + itself be retried as per the poll-specific retry settings in case of errors. The + operation-level retry settings do NOT apply to polling-RPC retries. + + With the actual timeout types being defined above, the client libraries + often refer to just Timeout without clarifying which type specifically + that is. In that case the actual timeout type (sometimes also referred to as + Logical Timeout) can be determined from the context. If it is a unary rpc + call (i.e. a regular one) Timeout usually stands for the RPC Timeout (if + provided directly as a standalone value) or Retry Timeout (if provided as + ``retry.timeout`` property of the unary RPC's retry config). For + ``Operation`` or ``PollingFuture`` in general Timeout stands for + Polling Timeout. + + Args: + predicate (Callable[Exception]): A callable that should return ``True`` + if the given exception is retryable. + initial (float): The minimum amount of time to delay in seconds. This + must be greater than 0. + maximum (float): The maximum amount of time to delay in seconds. + multiplier (float): The multiplier applied to the delay. + timeout (Optional[float]): How long to keep retrying, in seconds. + Note: timeout is only checked before initiating a retry, so the target may + run past the timeout value as long as it is healthy. + on_error (Callable[Exception]): A function to call while processing + a retryable exception. Any error raised by this function will + *not* be caught. + deadline (float): DEPRECATED: use `timeout` instead. For backward + compatibility, if specified it will override the ``timeout`` parameter. + """ + + def __call__( + self, + func: Callable[_P, _R], + on_error: Callable[[Exception], Any] | None = None, + ) -> Callable[_P, _R]: + """Wrap a callable with retry behavior. + + Args: + func (Callable): The callable to add retry behavior to. + on_error (Optional[Callable[Exception]]): If given, the + on_error callback will be called with each retryable exception + raised by the wrapped function. Any error raised by this + function will *not* be caught. If on_error was specified in the + constructor, this value will be ignored. + + Returns: + Callable: A callable that will invoke ``func`` with retry + behavior. + """ + if self._on_error is not None: + on_error = self._on_error + + @functools.wraps(func) + def retry_wrapped_func(*args: _P.args, **kwargs: _P.kwargs) -> _R: + """A wrapper that calls target function with retry.""" + target = functools.partial(func, *args, **kwargs) + sleep_generator = exponential_sleep_generator( + self._initial, self._maximum, multiplier=self._multiplier + ) + return retry_target( + target, + self._predicate, + sleep_generator, + timeout=self._timeout, + on_error=on_error, + ) + + return retry_wrapped_func diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_unary_async.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_unary_async.py new file mode 100644 index 0000000000000000000000000000000000000000..e76a37bb089d47d6f526f7dcf7f454ea0621ed1a --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry/retry_unary_async.py @@ -0,0 +1,238 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers for retrying coroutine functions with exponential back-off. + +The :class:`AsyncRetry` decorator shares most functionality and behavior with +:class:`Retry`, but supports coroutine functions. Please refer to description +of :class:`Retry` for more details. + +By default, this decorator will retry transient +API errors (see :func:`if_transient_error`). For example: + +.. code-block:: python + + @retry_async.AsyncRetry() + async def call_flaky_rpc(): + return await client.flaky_rpc() + + # Will retry flaky_rpc() if it raises transient API errors. + result = await call_flaky_rpc() + +You can pass a custom predicate to retry on different exceptions, such as +waiting for an eventually consistent item to be available: + +.. code-block:: python + + @retry_async.AsyncRetry(predicate=retry_async.if_exception_type(exceptions.NotFound)) + async def check_if_exists(): + return await client.does_thing_exist() + + is_available = await check_if_exists() + +Some client library methods apply retry automatically. These methods can accept +a ``retry`` parameter that allows you to configure the behavior: + +.. code-block:: python + + my_retry = retry_async.AsyncRetry(timeout=60) + result = await client.some_method(retry=my_retry) + +""" + +from __future__ import annotations + +import asyncio +import time +import functools +from typing import ( + Awaitable, + Any, + Callable, + Iterable, + TypeVar, + TYPE_CHECKING, +) + +from google.api_core.retry.retry_base import _BaseRetry +from google.api_core.retry.retry_base import _retry_error_helper +from google.api_core.retry.retry_base import exponential_sleep_generator +from google.api_core.retry.retry_base import build_retry_error +from google.api_core.retry.retry_base import RetryFailureReason + +# for backwards compatibility, expose helpers in this module +from google.api_core.retry.retry_base import if_exception_type # noqa +from google.api_core.retry.retry_base import if_transient_error # noqa + +if TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 10): + from typing import ParamSpec + else: + from typing_extensions import ParamSpec + + _P = ParamSpec("_P") # target function call parameters + _R = TypeVar("_R") # target function returned value + +_DEFAULT_INITIAL_DELAY = 1.0 # seconds +_DEFAULT_MAXIMUM_DELAY = 60.0 # seconds +_DEFAULT_DELAY_MULTIPLIER = 2.0 +_DEFAULT_DEADLINE = 60.0 * 2.0 # seconds +_DEFAULT_TIMEOUT = 60.0 * 2.0 # seconds + + +async def retry_target( + target: Callable[[], Awaitable[_R]], + predicate: Callable[[Exception], bool], + sleep_generator: Iterable[float], + timeout: float | None = None, + on_error: Callable[[Exception], None] | None = None, + exception_factory: Callable[ + [list[Exception], RetryFailureReason, float | None], + tuple[Exception, Exception | None], + ] = build_retry_error, + **kwargs, +): + """Await a coroutine and retry if it fails. + + This is the lowest-level retry helper. Generally, you'll use the + higher-level retry helper :class:`Retry`. + + Args: + target(Callable[[], Any]): The function to call and retry. This must be a + nullary function - apply arguments with `functools.partial`. + predicate (Callable[Exception]): A callable used to determine if an + exception raised by the target should be considered retryable. + It should return True to retry or False otherwise. + sleep_generator (Iterable[float]): An infinite iterator that determines + how long to sleep between retries. + timeout (Optional[float]): How long to keep retrying the target, in seconds. + Note: timeout is only checked before initiating a retry, so the target may + run past the timeout value as long as it is healthy. + on_error (Optional[Callable[Exception]]): If given, the on_error + callback will be called with each retryable exception raised by the + target. Any error raised by this function will *not* be caught. + exception_factory: A function that is called when the retryable reaches + a terminal failure state, used to construct an exception to be raised. + It takes a list of all exceptions encountered, a retry.RetryFailureReason + enum indicating the failure cause, and the original timeout value + as arguments. It should return a tuple of the exception to be raised, + along with the cause exception if any. The default implementation will raise + a RetryError on timeout, or the last exception encountered otherwise. + deadline (float): DEPRECATED use ``timeout`` instead. For backward + compatibility, if set it will override the ``timeout`` parameter. + + Returns: + Any: the return value of the target function. + + Raises: + ValueError: If the sleep generator stops yielding values. + Exception: a custom exception specified by the exception_factory if provided. + If no exception_factory is provided: + google.api_core.RetryError: If the timeout is exceeded while retrying. + Exception: If the target raises an error that isn't retryable. + """ + + timeout = kwargs.get("deadline", timeout) + + deadline = time.monotonic() + timeout if timeout is not None else None + error_list: list[Exception] = [] + + for sleep in sleep_generator: + try: + return await target() + # pylint: disable=broad-except + # This function explicitly must deal with broad exceptions. + except Exception as exc: + # defer to shared logic for handling errors + _retry_error_helper( + exc, + deadline, + sleep, + error_list, + predicate, + on_error, + exception_factory, + timeout, + ) + # if exception not raised, sleep before next attempt + await asyncio.sleep(sleep) + + raise ValueError("Sleep generator stopped yielding sleep values.") + + +class AsyncRetry(_BaseRetry): + """Exponential retry decorator for async coroutines. + + This class is a decorator used to add exponential back-off retry behavior + to an RPC call. + + Although the default behavior is to retry transient API errors, a + different predicate can be provided to retry other exceptions. + + Args: + predicate (Callable[Exception]): A callable that should return ``True`` + if the given exception is retryable. + initial (float): The minimum amount of time to delay in seconds. This + must be greater than 0. + maximum (float): The maximum amount of time to delay in seconds. + multiplier (float): The multiplier applied to the delay. + timeout (Optional[float]): How long to keep retrying in seconds. + Note: timeout is only checked before initiating a retry, so the target may + run past the timeout value as long as it is healthy. + on_error (Optional[Callable[Exception]]): A function to call while processing + a retryable exception. Any error raised by this function will + *not* be caught. + deadline (float): DEPRECATED use ``timeout`` instead. If set it will + override ``timeout`` parameter. + """ + + def __call__( + self, + func: Callable[..., Awaitable[_R]], + on_error: Callable[[Exception], Any] | None = None, + ) -> Callable[_P, Awaitable[_R]]: + """Wrap a callable with retry behavior. + + Args: + func (Callable): The callable or stream to add retry behavior to. + on_error (Optional[Callable[Exception]]): If given, the + on_error callback will be called with each retryable exception + raised by the wrapped function. Any error raised by this + function will *not* be caught. If on_error was specified in the + constructor, this value will be ignored. + + Returns: + Callable: A callable that will invoke ``func`` with retry + behavior. + """ + if self._on_error is not None: + on_error = self._on_error + + @functools.wraps(func) + async def retry_wrapped_func(*args: _P.args, **kwargs: _P.kwargs) -> _R: + """A wrapper that calls target function with retry.""" + sleep_generator = exponential_sleep_generator( + self._initial, self._maximum, multiplier=self._multiplier + ) + return await retry_target( + functools.partial(func, *args, **kwargs), + predicate=self._predicate, + sleep_generator=sleep_generator, + timeout=self._timeout, + on_error=on_error, + ) + + return retry_wrapped_func diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry_async.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry_async.py new file mode 100644 index 0000000000000000000000000000000000000000..90a2d5adf7c42b5c61a91f2548bc56503d9b604d --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/retry_async.py @@ -0,0 +1,34 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# The following imports are for backwards compatibility with https://github.com/googleapis/python-api-core/blob/4d7d2edee2c108d43deb151e6e0fdceb56b73275/google/api_core/retry_async.py +# +# TODO: Revert these imports on the next major version release (https://github.com/googleapis/python-api-core/issues/576) +from google.api_core import datetime_helpers # noqa: F401 +from google.api_core import exceptions # noqa: F401 +from google.api_core.retry import exponential_sleep_generator # noqa: F401 +from google.api_core.retry import if_exception_type # noqa: F401 +from google.api_core.retry import if_transient_error # noqa: F401 +from google.api_core.retry.retry_unary_async import AsyncRetry +from google.api_core.retry.retry_unary_async import retry_target + +__all__ = ( + "AsyncRetry", + "datetime_helpers", + "exceptions", + "exponential_sleep_generator", + "if_exception_type", + "if_transient_error", + "retry_target", +) diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/timeout.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/timeout.py new file mode 100644 index 0000000000000000000000000000000000000000..55b195e90b4781c43882a445f28930e35eb5eb1b --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/timeout.py @@ -0,0 +1,294 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Decorators for applying timeout arguments to functions. + +These decorators are used to wrap API methods to apply either a +Deadline-dependent (recommended), constant (DEPRECATED) or exponential +(DEPRECATED) timeout argument. + +For example, imagine an API method that can take a while to return results, +such as one that might block until a resource is ready: + +.. code-block:: python + + def is_thing_ready(timeout=None): + response = requests.get('https://example.com/is_thing_ready') + response.raise_for_status() + return response.json() + +This module allows a function like this to be wrapped so that timeouts are +automatically determined, for example: + +.. code-block:: python + + timeout_ = timeout.ExponentialTimeout() + is_thing_ready_with_timeout = timeout_(is_thing_ready) + + for n in range(10): + try: + is_thing_ready_with_timeout({'example': 'data'}) + except: + pass + +In this example the first call to ``is_thing_ready`` will have a relatively +small timeout (like 1 second). If the resource is available and the request +completes quickly, the loop exits. But, if the resource isn't yet available +and the request times out, it'll be retried - this time with a larger timeout. + +In the broader context these decorators are typically combined with +:mod:`google.api_core.retry` to implement API methods with a signature that +matches ``api_method(request, timeout=None, retry=None)``. +""" + +from __future__ import unicode_literals + +import datetime +import functools + +from google.api_core import datetime_helpers + +_DEFAULT_INITIAL_TIMEOUT = 5.0 # seconds +_DEFAULT_MAXIMUM_TIMEOUT = 30.0 # seconds +_DEFAULT_TIMEOUT_MULTIPLIER = 2.0 +# If specified, must be in seconds. If none, deadline is not used in the +# timeout calculation. +_DEFAULT_DEADLINE = None + + +class TimeToDeadlineTimeout(object): + """A decorator that decreases timeout set for an RPC based on how much time + has left till its deadline. The deadline is calculated as + ``now + initial_timeout`` when this decorator is first called for an rpc. + + In other words this decorator implements deadline semantics in terms of a + sequence of decreasing timeouts t0 > t1 > t2 ... tn >= 0. + + Args: + timeout (Optional[float]): the timeout (in seconds) to applied to the + wrapped function. If `None`, the target function is expected to + never timeout. + """ + + def __init__(self, timeout=None, clock=datetime_helpers.utcnow): + self._timeout = timeout + self._clock = clock + + def __call__(self, func): + """Apply the timeout decorator. + + Args: + func (Callable): The function to apply the timeout argument to. + This function must accept a timeout keyword argument. + + Returns: + Callable: The wrapped function. + """ + + first_attempt_timestamp = self._clock().timestamp() + + @functools.wraps(func) + def func_with_timeout(*args, **kwargs): + """Wrapped function that adds timeout.""" + + if self._timeout is not None: + # All calculations are in seconds + now_timestamp = self._clock().timestamp() + + # To avoid usage of nonlocal but still have round timeout + # numbers for first attempt (in most cases the only attempt made + # for an RPC. + if now_timestamp - first_attempt_timestamp < 0.001: + now_timestamp = first_attempt_timestamp + + time_since_first_attempt = now_timestamp - first_attempt_timestamp + remaining_timeout = self._timeout - time_since_first_attempt + + # Although the `deadline` parameter in `google.api_core.retry.Retry` + # is deprecated, and should be treated the same as the `timeout`, + # it is still possible for the `deadline` argument in + # `google.api_core.retry.Retry` to be larger than the `timeout`. + # See https://github.com/googleapis/python-api-core/issues/654 + # Only positive non-zero timeouts are supported. + # Revert back to the initial timeout for negative or 0 timeout values. + if remaining_timeout < 1: + remaining_timeout = self._timeout + + kwargs["timeout"] = remaining_timeout + + return func(*args, **kwargs) + + return func_with_timeout + + def __str__(self): + return "".format(self._timeout) + + +class ConstantTimeout(object): + """A decorator that adds a constant timeout argument. + + DEPRECATED: use ``TimeToDeadlineTimeout`` instead. + + This is effectively equivalent to + ``functools.partial(func, timeout=timeout)``. + + Args: + timeout (Optional[float]): the timeout (in seconds) to applied to the + wrapped function. If `None`, the target function is expected to + never timeout. + """ + + def __init__(self, timeout=None): + self._timeout = timeout + + def __call__(self, func): + """Apply the timeout decorator. + + Args: + func (Callable): The function to apply the timeout argument to. + This function must accept a timeout keyword argument. + + Returns: + Callable: The wrapped function. + """ + + @functools.wraps(func) + def func_with_timeout(*args, **kwargs): + """Wrapped function that adds timeout.""" + kwargs["timeout"] = self._timeout + return func(*args, **kwargs) + + return func_with_timeout + + def __str__(self): + return "".format(self._timeout) + + +def _exponential_timeout_generator(initial, maximum, multiplier, deadline): + """A generator that yields exponential timeout values. + + Args: + initial (float): The initial timeout. + maximum (float): The maximum timeout. + multiplier (float): The multiplier applied to the timeout. + deadline (float): The overall deadline across all invocations. + + Yields: + float: A timeout value. + """ + if deadline is not None: + deadline_datetime = datetime_helpers.utcnow() + datetime.timedelta( + seconds=deadline + ) + else: + deadline_datetime = datetime.datetime.max + + timeout = initial + while True: + now = datetime_helpers.utcnow() + yield min( + # The calculated timeout based on invocations. + timeout, + # The set maximum timeout. + maximum, + # The remaining time before the deadline is reached. + float((deadline_datetime - now).seconds), + ) + timeout = timeout * multiplier + + +class ExponentialTimeout(object): + """A decorator that adds an exponentially increasing timeout argument. + + DEPRECATED: the concept of incrementing timeout exponentially has been + deprecated. Use ``TimeToDeadlineTimeout`` instead. + + This is useful if a function is called multiple times. Each time the + function is called this decorator will calculate a new timeout parameter + based on the the number of times the function has been called. + + For example + + .. code-block:: python + + Args: + initial (float): The initial timeout to pass. + maximum (float): The maximum timeout for any one call. + multiplier (float): The multiplier applied to the timeout for each + invocation. + deadline (Optional[float]): The overall deadline across all + invocations. This is used to prevent a very large calculated + timeout from pushing the overall execution time over the deadline. + This is especially useful in conjunction with + :mod:`google.api_core.retry`. If ``None``, the timeouts will not + be adjusted to accommodate an overall deadline. + """ + + def __init__( + self, + initial=_DEFAULT_INITIAL_TIMEOUT, + maximum=_DEFAULT_MAXIMUM_TIMEOUT, + multiplier=_DEFAULT_TIMEOUT_MULTIPLIER, + deadline=_DEFAULT_DEADLINE, + ): + self._initial = initial + self._maximum = maximum + self._multiplier = multiplier + self._deadline = deadline + + def with_deadline(self, deadline): + """Return a copy of this timeout with the given deadline. + + Args: + deadline (float): The overall deadline across all invocations. + + Returns: + ExponentialTimeout: A new instance with the given deadline. + """ + return ExponentialTimeout( + initial=self._initial, + maximum=self._maximum, + multiplier=self._multiplier, + deadline=deadline, + ) + + def __call__(self, func): + """Apply the timeout decorator. + + Args: + func (Callable): The function to apply the timeout argument to. + This function must accept a timeout keyword argument. + + Returns: + Callable: The wrapped function. + """ + timeouts = _exponential_timeout_generator( + self._initial, self._maximum, self._multiplier, self._deadline + ) + + @functools.wraps(func) + def func_with_timeout(*args, **kwargs): + """Wrapped function that adds timeout.""" + kwargs["timeout"] = next(timeouts) + return func(*args, **kwargs) + + return func_with_timeout + + def __str__(self): + return ( + "".format( + self._initial, self._maximum, self._multiplier, self._deadline + ) + ) diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/universe.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/universe.py new file mode 100644 index 0000000000000000000000000000000000000000..35669642c423c8d050f20392956d05eca6768e78 --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/universe.py @@ -0,0 +1,82 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers for universe domain.""" + +from typing import Any, Optional + +DEFAULT_UNIVERSE = "googleapis.com" + + +class EmptyUniverseError(ValueError): + def __init__(self): + message = "Universe Domain cannot be an empty string." + super().__init__(message) + + +class UniverseMismatchError(ValueError): + def __init__(self, client_universe, credentials_universe): + message = ( + f"The configured universe domain ({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{DEFAULT_UNIVERSE}` is the default." + ) + super().__init__(message) + + +def determine_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] +) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the + "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise EmptyUniverseError + return universe_domain + + +def compare_domains(client_universe: str, credentials: Any) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials Any: The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + credentials_universe = getattr(credentials, "universe_domain", DEFAULT_UNIVERSE) + + if client_universe != credentials_universe: + raise UniverseMismatchError(client_universe, credentials_universe) + return True diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/api_core/version_header.py b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/version_header.py new file mode 100644 index 0000000000000000000000000000000000000000..cf1972aca42ed634096179c97b904564ba550924 --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/api_core/version_header.py @@ -0,0 +1,29 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +API_VERSION_METADATA_KEY = "x-goog-api-version" + + +def to_api_version_header(version_identifier): + """Returns data for the API Version header for the given `version_identifier`. + + Args: + version_identifier (str): The version identifier to be used in the + tuple returned. + + Returns: + Tuple(str, str): A tuple containing the API Version metadata key and + value. + """ + return (API_VERSION_METADATA_KEY, version_identifier) diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/__init__.cpython-310.pyc b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9223e9f345584641a615a1cd94f12272aafb3e79 Binary files /dev/null and b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/duration_pb2.cpython-310.pyc b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/duration_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8eeba99938ab43c66ecc650dda2837b1729051f9 Binary files /dev/null and b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/duration_pb2.cpython-310.pyc differ diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/field_mask_pb2.cpython-310.pyc b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/field_mask_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88f8af4cb269fa0a05eb5b82fe3dd90688442f32 Binary files /dev/null and b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/field_mask_pb2.cpython-310.pyc differ diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/runtime_version.cpython-310.pyc b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/runtime_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68c3873ee4ac6feb0e9329709fd7c6d7206322c7 Binary files /dev/null and b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/runtime_version.cpython-310.pyc differ diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/type_pb2.cpython-310.pyc b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/type_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05da3486ea52db5c744c600350528cd8a291ed75 Binary files /dev/null and b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/__pycache__/type_pb2.cpython-310.pyc differ diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/pyext/__init__.py b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/pyext/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/pyext/__pycache__/__init__.cpython-310.pyc b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/pyext/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0a8a31cbfe4ff1b782f12caeded322b3782e93a Binary files /dev/null and b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/pyext/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/pyext/cpp_message.py b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/pyext/cpp_message.py new file mode 100644 index 0000000000000000000000000000000000000000..623b52fbffc99b9ac0f2401776428bf063ffb7a1 --- /dev/null +++ b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/pyext/cpp_message.py @@ -0,0 +1,49 @@ +# Protocol Buffers - Google's data interchange format +# Copyright 2008 Google Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +"""Protocol message implementation hooks for C++ implementation. + +Contains helper functions used to create protocol message classes from +Descriptor objects at runtime backed by the protocol buffer C++ API. +""" + +__author__ = 'tibell@google.com (Johan Tibell)' + +from google.protobuf.internal import api_implementation + + +# pylint: disable=protected-access +_message = api_implementation._c_module +# TODO: Remove this import after fix api_implementation +if _message is None: + from google.protobuf.pyext import _message + + +class GeneratedProtocolMessageType(_message.MessageMeta): + + """Metaclass for protocol message classes created at runtime from Descriptors. + + The protocol compiler currently uses this metaclass to create protocol + message classes at runtime. Clients can also manually create their own + classes at runtime, as in this example: + + mydescriptor = Descriptor(.....) + factory = symbol_database.Default() + factory.pool.AddDescriptor(mydescriptor) + MyProtoClass = factory.GetPrototype(mydescriptor) + myproto_instance = MyProtoClass() + myproto.foo_field = 23 + ... + + The above example will not work for nested types. If you wish to include them, + use reflection.MakeClass() instead of manually instantiating the class in + order to create the appropriate class structure. + """ + + # Must be consistent with the protocol-compiler code in + # proto2/compiler/internal/generator.*. + _DESCRIPTOR_KEY = 'DESCRIPTOR' diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/testdata/__init__.py b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/testdata/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/testdata/__pycache__/__init__.cpython-310.pyc b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/testdata/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c01d2ee511c3cf589a6ec08840f88f76c793c4b Binary files /dev/null and b/evalkit_tf433/lib/python3.10/site-packages/google/protobuf/testdata/__pycache__/__init__.cpython-310.pyc differ