code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def transform_conversation(self, example: dict) -> Conversation: """Transform the example into a Conversation object. Sample "question": "[USER] Can you come up with a joke? [ASSISTANT]" It starts with a [USER] and ends with an [ASSISTANT] role tag. The Assistant response appears in the...
Transform the example into a Conversation object. Sample "question": "[USER] Can you come up with a joke? [ASSISTANT]" It starts with a [USER] and ends with an [ASSISTANT] role tag. The Assistant response appears in the "answer" field.
transform_conversation
python
oumi-ai/oumi
src/oumi/datasets/vision_language/pixmo_cap_qa.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/datasets/vision_language/pixmo_cap_qa.py
Apache-2.0
def transform_conversation(self, example: dict[str, Any]) -> Conversation: """Transform raw data into a conversation with images.""" for required_key in ("images", "texts"): if required_key not in example: raise ValueError( f"Example doesn't contain '{requ...
Transform raw data into a conversation with images.
transform_conversation
python
oumi-ai/oumi
src/oumi/datasets/vision_language/the_cauldron.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/datasets/vision_language/the_cauldron.py
Apache-2.0
def __init__( self, dataset_path: Optional[Union[str, Path]] = None, data: Optional[list] = None, **kwargs, ): """Initializes a new instance of the VLJsonlinesDataset class.""" if dataset_path is not None and data is not None: raise ValueError( ...
Initializes a new instance of the VLJsonlinesDataset class.
__init__
python
oumi-ai/oumi
src/oumi/datasets/vision_language/vision_jsonlines.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/datasets/vision_language/vision_jsonlines.py
Apache-2.0
def transform_conversation(self, example: dict) -> Conversation: """Transform a single conversation example into a Conversation object.""" input_text = self._process_text_value(example["question"]) output_text = self._process_text_value(example["multiple_choice_answer"]) messages = [ ...
Transform a single conversation example into a Conversation object.
transform_conversation
python
oumi-ai/oumi
src/oumi/datasets/vision_language/vqav2_small.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/datasets/vision_language/vqav2_small.py
Apache-2.0
def _extract_json(response: str) -> Optional[dict]: r"""Returns the json answer extracted from ```json ...```, or None otherwise.""" logger.info(f"response: {response}") # re.DOTALL lets '.' match newlines. Most LLMs use newlines in their JSON outputs. regex_result = re.findall("```json(.*)```", respons...
Returns the json answer extracted from ```json ...```, or None otherwise.
_extract_json
python
oumi-ai/oumi
src/oumi/evaluation/registry/berry_bench_task.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/evaluation/registry/berry_bench_task.py
Apache-2.0
def _extract_prediction(response: str) -> Optional[int]: r"""Returns the numeric answer extracted from `\boxed{...}`, or None otherwise.""" regex_result = re.findall(r"\\boxed\{([-+]?\d+)\}", response) if not regex_result or len(regex_result) != 1: return None number_str = regex_result[0] # ...
Returns the numeric answer extracted from `\boxed{...}`, or None otherwise.
_extract_prediction
python
oumi-ai/oumi
src/oumi/evaluation/registry/count_letters_task.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/evaluation/registry/count_letters_task.py
Apache-2.0
def _convert_conversation_to_api_input( self, conversation: Conversation, generation_params: GenerationParams, model_params: ModelParams, ) -> dict[str, Any]: """Converts a conversation to an Anthropic API input. This method transforms an Oumi Conversation object int...
Converts a conversation to an Anthropic API input. This method transforms an Oumi Conversation object into a format suitable for the Anthropic API. It handles system messages separately and structures the conversation history as required by Anthropic. See https://docs.anthropic.com/cla...
_convert_conversation_to_api_input
python
oumi-ai/oumi
src/oumi/inference/anthropic_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/anthropic_inference_engine.py
Apache-2.0
def _convert_api_output_to_conversation( self, response: dict[str, Any], original_conversation: Conversation ) -> Conversation: """Converts an Anthropic API response to a conversation.""" new_message = Message( content=response[_CONTENT_KEY][0]["text"], role=Role.ASSI...
Converts an Anthropic API response to a conversation.
_convert_api_output_to_conversation
python
oumi-ai/oumi
src/oumi/inference/anthropic_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/anthropic_inference_engine.py
Apache-2.0
def get_supported_params(self) -> set[str]: """Returns a set of supported generation parameters for this engine.""" return { "max_new_tokens", "stop_strings", "temperature", "top_p", }
Returns a set of supported generation parameters for this engine.
get_supported_params
python
oumi-ai/oumi
src/oumi/inference/anthropic_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/anthropic_inference_engine.py
Apache-2.0
def __init__( self, model_params: ModelParams, *, generation_params: Optional[GenerationParams] = None, remote_params: Optional[RemoteParams] = None, project_id_env_key: Optional[str] = None, region_env_key: Optional[str] = None, project_id: Optional[str] ...
Initializes the inference Engine. Args: model_params: The model parameters to use for inference. generation_params: The generation parameters to use for inference. remote_params: The remote parameters to use for inference. project_id_env_key: The environment vari...
__init__
python
oumi-ai/oumi
src/oumi/inference/gcp_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/gcp_inference_engine.py
Apache-2.0
def _get_api_key(self, remote_params: RemoteParams) -> str: """Gets the authentication token for GCP.""" try: from google.auth import default # pyright: ignore[reportMissingImports] from google.auth.transport.requests import ( # pyright: ignore[reportMissingImports] ...
Gets the authentication token for GCP.
_get_api_key
python
oumi-ai/oumi
src/oumi/inference/gcp_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/gcp_inference_engine.py
Apache-2.0
def _get_request_headers( self, remote_params: Optional[RemoteParams] ) -> dict[str, str]: """Gets the request headers for GCP.""" if not remote_params: raise ValueError("Remote params are required for GCP inference.") headers = { "Authorization": f"Bearer {s...
Gets the request headers for GCP.
_get_request_headers
python
oumi-ai/oumi
src/oumi/inference/gcp_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/gcp_inference_engine.py
Apache-2.0
def _convert_conversation_to_api_input( self, conversation: Conversation, generation_params: GenerationParams, model_params: ModelParams, ) -> dict[str, Any]: """Converts a conversation to an OpenAI input. Documentation: https://cloud.google.com/vertex-ai/generative-...
Converts a conversation to an OpenAI input. Documentation: https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/call-vertex-using-openai-library Args: conversation: The conversation to convert. generation_params: Parameters for generation during inference. ...
_convert_conversation_to_api_input
python
oumi-ai/oumi
src/oumi/inference/gcp_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/gcp_inference_engine.py
Apache-2.0
def get_supported_params(self) -> set[str]: """Returns a set of supported generation parameters for this engine.""" return { "guided_decoding", "logit_bias", "max_new_tokens", "seed", "stop_strings", "temperature", "top_...
Returns a set of supported generation parameters for this engine.
get_supported_params
python
oumi-ai/oumi
src/oumi/inference/gcp_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/gcp_inference_engine.py
Apache-2.0
def _convert_guided_decoding_config_to_api_input( guided_config: GuidedDecodingParams, ) -> dict: """Converts a guided decoding configuration to an API input.""" if guided_config.json is None: raise ValueError( "Only JSON schema guided decoding is supported, got '%s'", guided...
Converts a guided decoding configuration to an API input.
_convert_guided_decoding_config_to_api_input
python
oumi-ai/oumi
src/oumi/inference/gcp_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/gcp_inference_engine.py
Apache-2.0
def _replace_refs_in_schema(schema: dict) -> dict: """Replace $ref references in a JSON schema with their actual definitions. Args: schema: The JSON schema dictionary Returns: dict: Schema with all references replaced by their definitions and $defs removed """ def _get_ref_value(r...
Replace $ref references in a JSON schema with their actual definitions. Args: schema: The JSON schema dictionary Returns: dict: Schema with all references replaced by their definitions and $defs removed
_replace_refs_in_schema
python
oumi-ai/oumi
src/oumi/inference/gcp_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/gcp_inference_engine.py
Apache-2.0
def _convert_conversation_to_api_input( self, conversation: Conversation, generation_params: GenerationParams, model_params: ModelParams, ) -> dict[str, Any]: """Converts a conversation to an Gemini API input. Documentation: https://ai.google.dev/docs Args: ...
Converts a conversation to an Gemini API input. Documentation: https://ai.google.dev/docs Args: conversation: The conversation to convert. generation_params: Parameters for generation during inference. model_params: Model parameters to use during inference. ...
_convert_conversation_to_api_input
python
oumi-ai/oumi
src/oumi/inference/gemini_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/gemini_inference_engine.py
Apache-2.0
def get_supported_params(self) -> set[str]: """Returns a set of supported generation parameters for this engine.""" return { "guided_decoding", "max_new_tokens", "stop_strings", "temperature", "top_p", }
Returns a set of supported generation parameters for this engine.
get_supported_params
python
oumi-ai/oumi
src/oumi/inference/gemini_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/gemini_inference_engine.py
Apache-2.0
def infer_batch( self, conversations: list[Conversation], inference_config: dict[str, Any] ) -> str: """Run inference on a batch of conversations. Args: conversations: The batch of conversations to infer on. inference_config: The inference configuration. Ret...
Run inference on a batch of conversations. Args: conversations: The batch of conversations to infer on. inference_config: The inference configuration. Returns: str: The batch ID.
infer_batch
python
oumi-ai/oumi
src/oumi/inference/gemini_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/gemini_inference_engine.py
Apache-2.0
def __init__( self, model_params: ModelParams, *, generation_params: Optional[GenerationParams] = None, ): """Initializes the LlamaCppInferenceEngine. This method sets up the engine for running inference using llama.cpp. It loads the specified model and confi...
Initializes the LlamaCppInferenceEngine. This method sets up the engine for running inference using llama.cpp. It loads the specified model and configures the inference parameters. Documentation: https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.create_completion...
__init__
python
oumi-ai/oumi
src/oumi/inference/llama_cpp_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/llama_cpp_inference_engine.py
Apache-2.0
def _convert_conversation_to_llama_input( self, conversation: Conversation ) -> list[dict[str, str]]: """Converts a conversation to a list of llama.cpp input messages.""" # FIXME Handle multimodal e.g., raise an error. return [ { "content": message.compute...
Converts a conversation to a list of llama.cpp input messages.
_convert_conversation_to_llama_input
python
oumi-ai/oumi
src/oumi/inference/llama_cpp_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/llama_cpp_inference_engine.py
Apache-2.0
def _infer( self, input: list[Conversation], inference_config: Optional[InferenceConfig] = None, ) -> list[Conversation]: """Runs model inference on the provided input using llama.cpp. Args: input: A list of conversations to run inference on. Each...
Runs model inference on the provided input using llama.cpp. Args: input: A list of conversations to run inference on. Each conversation should contain at least one message. inference_config: Parameters for inference. Returns: List[Conversation]: A li...
_infer
python
oumi-ai/oumi
src/oumi/inference/llama_cpp_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/llama_cpp_inference_engine.py
Apache-2.0
def get_supported_params(self) -> set[str]: """Returns a set of supported generation parameters for this engine.""" return { "frequency_penalty", "logit_bias", "max_new_tokens", "min_p", "presence_penalty", "stop_strings", ...
Returns a set of supported generation parameters for this engine.
get_supported_params
python
oumi-ai/oumi
src/oumi/inference/llama_cpp_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/llama_cpp_inference_engine.py
Apache-2.0
def infer_online( self, input: list[Conversation], inference_config: Optional[InferenceConfig] = None, ) -> list[Conversation]: """Runs model inference online. Args: input: A list of conversations to run inference on. inference_config: Parameters for ...
Runs model inference online. Args: input: A list of conversations to run inference on. inference_config: Parameters for inference. Returns: List[Conversation]: Inference output.
infer_online
python
oumi-ai/oumi
src/oumi/inference/llama_cpp_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/llama_cpp_inference_engine.py
Apache-2.0
def infer_from_file( self, input_filepath: str, inference_config: Optional[InferenceConfig] = None, ) -> list[Conversation]: """Runs model inference on inputs in the provided file. This is a convenience method to prevent boilerplate from asserting the existence of in...
Runs model inference on inputs in the provided file. This is a convenience method to prevent boilerplate from asserting the existence of input_filepath in the generation_params. Args: input_filepath: Path to the input file containing prompts for generation. inference_co...
infer_from_file
python
oumi-ai/oumi
src/oumi/inference/llama_cpp_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/llama_cpp_inference_engine.py
Apache-2.0
def __init__( self, model_params: ModelParams, *, generation_params: Optional[GenerationParams] = None, ): """Initializes the inference Engine. Args: model_params: The model parameters to use for inference. generation_params: Parameters for ge...
Initializes the inference Engine. Args: model_params: The model parameters to use for inference. generation_params: Parameters for generation.
__init__
python
oumi-ai/oumi
src/oumi/inference/native_text_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/native_text_inference_engine.py
Apache-2.0
def _make_batches( self, input: list[Conversation], batch_size: int ) -> list[list[Conversation]]: """Splits the input into batches of the specified size. Args: input: A list of text prompts. batch_size: The number of sequences to generate in parallel. Retur...
Splits the input into batches of the specified size. Args: input: A list of text prompts. batch_size: The number of sequences to generate in parallel. Returns: List[List[str]]: A list of batches of text prompts.
_make_batches
python
oumi-ai/oumi
src/oumi/inference/native_text_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/native_text_inference_engine.py
Apache-2.0
def _update_stop_criteria( self, generation_params: GenerationParams ) -> GenerationParams: """Updates the stop tokens/strings in the generation params, if needed. Args: generation_params: Parameters for generation during inference. Returns: GenerationParams...
Updates the stop tokens/strings in the generation params, if needed. Args: generation_params: Parameters for generation during inference. Returns: GenerationParams: Updated generation params. Note: model.generate accepts both `stop_strings` and `stop_token_...
_update_stop_criteria
python
oumi-ai/oumi
src/oumi/inference/native_text_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/native_text_inference_engine.py
Apache-2.0
def _infer( self, input: list[Conversation], inference_config: Optional[InferenceConfig] = None, ) -> list[Conversation]: """Runs batch inference for a model using the provided configuration. Args: input: A list of conversations to run inference on. i...
Runs batch inference for a model using the provided configuration. Args: input: A list of conversations to run inference on. inference_config: Parameters for inference. Returns: object: A list of model responses of shape (num_batches, batch_size).
_infer
python
oumi-ai/oumi
src/oumi/inference/native_text_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/native_text_inference_engine.py
Apache-2.0
def infer_online( self, input: list[Conversation], inference_config: Optional[InferenceConfig] = None, ) -> list[Conversation]: """Runs model inference online. Args: input: A list of conversations to run inference on. inference_config: Parameters for ...
Runs model inference online. Args: input: A list of conversations to run inference on. inference_config: Parameters for inference. Returns: List[Conversation]: Inference output.
infer_online
python
oumi-ai/oumi
src/oumi/inference/native_text_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/native_text_inference_engine.py
Apache-2.0
def infer_from_file( self, input_filepath: str, inference_config: Optional[InferenceConfig] = None, ) -> list[Conversation]: """Runs model inference on inputs in the provided file. This is a convenience method to prevent boilerplate from asserting the existence of in...
Runs model inference on inputs in the provided file. This is a convenience method to prevent boilerplate from asserting the existence of input_filepath in the generation_params. Args: input_filepath: Path to the input file containing prompts for generation. inference_co...
infer_from_file
python
oumi-ai/oumi
src/oumi/inference/native_text_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/native_text_inference_engine.py
Apache-2.0
def get_supported_params(self) -> set[str]: """Returns a set of supported generation parameters for this engine.""" return { "batch_size", "exclude_prompt_from_response", "frequency_penalty", "max_new_tokens", "min_p", "presence_pen...
Returns a set of supported generation parameters for this engine.
get_supported_params
python
oumi-ai/oumi
src/oumi/inference/native_text_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/native_text_inference_engine.py
Apache-2.0
def _convert_conversation_to_api_input( self, conversation: Conversation, generation_params: GenerationParams, model_params: ModelParams, ) -> dict[str, Any]: """Converts a conversation to an OpenAI input. Documentation: https://platform.openai.com/docs/api-reference...
Converts a conversation to an OpenAI input. Documentation: https://platform.openai.com/docs/api-reference/chat/create Args: conversation: The conversation to convert. generation_params: Parameters for generation during inference. model_params: Model parameters to us...
_convert_conversation_to_api_input
python
oumi-ai/oumi
src/oumi/inference/openai_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/openai_inference_engine.py
Apache-2.0
def from_api_response(cls, response: dict[str, Any]) -> "BatchInfo": """Create BatchInfo from API response dictionary. Args: response: Raw API response dictionary Returns: BatchInfo: Parsed batch information """ return cls( id=response["id"],...
Create BatchInfo from API response dictionary. Args: response: Raw API response dictionary Returns: BatchInfo: Parsed batch information
from_api_response
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def is_terminal(self) -> bool: """Return True if the batch is in a terminal state.""" return self.status in ( BatchStatus.COMPLETED, BatchStatus.FAILED, BatchStatus.EXPIRED, BatchStatus.CANCELLED, )
Return True if the batch is in a terminal state.
is_terminal
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def completion_percentage(self) -> float: """Return the percentage of completed requests.""" return ( (100 * self.completed_requests / self.total_requests) if self.total_requests > 0 else 0.0 )
Return the percentage of completed requests.
completion_percentage
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def __init__( self, model_params: ModelParams, *, generation_params: Optional[GenerationParams] = None, remote_params: Optional[RemoteParams] = None, ): """Initializes the inference Engine. Args: model_params: The model parameters to use for infer...
Initializes the inference Engine. Args: model_params: The model parameters to use for inference. generation_params: Generation parameters to use for inference. remote_params: Remote server params. **kwargs: Additional keyword arguments.
__init__
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def _convert_conversation_to_api_input( self, conversation: Conversation, generation_params: GenerationParams, model_params: ModelParams, ) -> dict[str, Any]: """Converts a conversation to an OpenAI input. Documentation: https://platform.openai.com/docs/api-reference...
Converts a conversation to an OpenAI input. Documentation: https://platform.openai.com/docs/api-reference/chat/create Args: conversation: The conversation to convert. generation_params: Parameters for generation during inference. model_params: Model parameters to us...
_convert_conversation_to_api_input
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def _convert_api_output_to_conversation( self, response: dict[str, Any], original_conversation: Conversation ) -> Conversation: """Converts an API response to a conversation. Args: response: The API response to convert. original_conversation: The original conversatio...
Converts an API response to a conversation. Args: response: The API response to convert. original_conversation: The original conversation. Returns: Conversation: The conversation including the generated response.
_convert_api_output_to_conversation
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
async def _infer( self, input: list[Conversation], inference_config: Optional[InferenceConfig] = None, ) -> list[Conversation]: """Runs model inference on the provided input. Args: input: A list of conversations to run inference on. inference_config: ...
Runs model inference on the provided input. Args: input: A list of conversations to run inference on. inference_config: Parameters for inference. remote_params: Parameters for running inference against a remote API. Returns: List[Conversation]: Inference...
_infer
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def infer_online( self, input: list[Conversation], inference_config: Optional[InferenceConfig] = None, ) -> list[Conversation]: """Runs model inference online. Args: input: A list of conversations to run inference on. inference_config: Parameters for ...
Runs model inference online. Args: input: A list of conversations to run inference on. inference_config: Parameters for inference. Returns: List[Conversation]: Inference output.
infer_online
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def infer_from_file( self, input_filepath: str, inference_config: Optional[InferenceConfig] = None ) -> list[Conversation]: """Runs model inference on inputs in the provided file. This is a convenience method to prevent boilerplate from asserting the existence of input_filepath in t...
Runs model inference on inputs in the provided file. This is a convenience method to prevent boilerplate from asserting the existence of input_filepath in the generation_params. Args: input_filepath: Path to the input file containing prompts for generation. ...
infer_from_file
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def get_supported_params(self) -> set[str]: """Returns a set of supported generation parameters for this engine.""" return { "frequency_penalty", "guided_decoding", "logit_bias", "max_new_tokens", "min_p", "presence_penalty", ...
Returns a set of supported generation parameters for this engine.
get_supported_params
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def get_file_api_url(self) -> str: """Returns the URL for the file API.""" return str( urllib.parse.urlparse(self._remote_params.api_url) ._replace(path="/v1/files") .geturl() )
Returns the URL for the file API.
get_file_api_url
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def get_batch_api_url(self) -> str: """Returns the URL for the batch API.""" return str( urllib.parse.urlparse(self._remote_params.api_url) ._replace(path="/v1/batches") .geturl() )
Returns the URL for the batch API.
get_batch_api_url
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def infer_batch( self, conversations: list[Conversation], inference_config: Optional[InferenceConfig] = None, ) -> str: """Creates a new batch inference job. Args: conversations: List of conversations to process in batch inference_config: Parameters f...
Creates a new batch inference job. Args: conversations: List of conversations to process in batch inference_config: Parameters for inference Returns: str: The batch job ID
infer_batch
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def get_batch_status( self, batch_id: str, ) -> BatchInfo: """Gets the status of a batch inference job. Args: batch_id: The batch job ID Returns: BatchInfo: Current status of the batch job """ return safe_asyncio_run(self._get_batch_s...
Gets the status of a batch inference job. Args: batch_id: The batch job ID Returns: BatchInfo: Current status of the batch job
get_batch_status
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def get_batch_results( self, batch_id: str, conversations: list[Conversation], ) -> list[Conversation]: """Gets the results of a completed batch job. Args: batch_id: The batch job ID conversations: Original conversations used to create the batch ...
Gets the results of a completed batch job. Args: batch_id: The batch job ID conversations: Original conversations used to create the batch Returns: List[Conversation]: The processed conversations with responses Raises: RuntimeError: If the batch...
get_batch_results
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
async def _upload_batch_file( self, batch_requests: list[dict], ) -> str: """Uploads a JSONL file containing batch requests. Args: batch_requests: List of request objects to include in the batch Returns: str: The uploaded file ID """ ...
Uploads a JSONL file containing batch requests. Args: batch_requests: List of request objects to include in the batch Returns: str: The uploaded file ID
_upload_batch_file
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
async def _create_batch( self, conversations: list[Conversation], generation_params: GenerationParams, model_params: ModelParams, ) -> str: """Creates a new batch job. Args: conversations: List of conversations to process in batch generation_p...
Creates a new batch job. Args: conversations: List of conversations to process in batch generation_params: Generation parameters model_params: Model parameters Returns: str: The batch job ID
_create_batch
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
async def _get_batch_status( self, batch_id: str, ) -> BatchInfo: """Gets the status of a batch job. Args: batch_id: ID of the batch job Returns: BatchInfo: Current status of the batch job """ connector = aiohttp.TCPConnector(limit=se...
Gets the status of a batch job. Args: batch_id: ID of the batch job Returns: BatchInfo: Current status of the batch job
_get_batch_status
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
async def _get_batch_results_with_mapping( self, batch_id: str, conversations: list[Conversation], ) -> list[Conversation]: """Gets the results of a completed batch job and maps them to conversations. Args: batch_id: ID of the batch job conversations:...
Gets the results of a completed batch job and maps them to conversations. Args: batch_id: ID of the batch job conversations: Original conversations used to create the batch Returns: List[Conversation]: The processed conversations with responses Raises: ...
_get_batch_results_with_mapping
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
async def _get_file( self, file_id: str, ) -> FileInfo: """Gets information about a file. Args: file_id: ID of the file remote_params: Remote API parameters Returns: FileInfo: File information """ connector = aiohttp.TCPCo...
Gets information about a file. Args: file_id: ID of the file remote_params: Remote API parameters Returns: FileInfo: File information
_get_file
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
async def _delete_file( self, file_id: str, ) -> bool: """Deletes a file. Args: file_id: ID of the file to delete remote_params: Remote API parameters Returns: bool: True if deletion was successful """ connector = aiohttp....
Deletes a file. Args: file_id: ID of the file to delete remote_params: Remote API parameters Returns: bool: True if deletion was successful
_delete_file
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
async def _download_file( self, file_id: str, ) -> str: """Downloads a file's content. Args: file_id: ID of the file to download remote_params: Remote API parameters Returns: str: The file content """ connector = aiohttp.T...
Downloads a file's content. Args: file_id: ID of the file to download remote_params: Remote API parameters Returns: str: The file content
_download_file
python
oumi-ai/oumi
src/oumi/inference/remote_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_inference_engine.py
Apache-2.0
def get_supported_params(self) -> set[str]: """Returns a set of supported generation parameters for this engine.""" return { "frequency_penalty", "logit_bias", "presence_penalty", "seed", "stop_strings", "stop_token_ids", ...
Returns a set of supported generation parameters for this engine.
get_supported_params
python
oumi-ai/oumi
src/oumi/inference/remote_vllm_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_vllm_inference_engine.py
Apache-2.0
def _convert_conversation_to_api_input( self, conversation: Conversation, generation_params: GenerationParams, model_params: ModelParams, ) -> dict[str, Any]: """Converts a conversation to an OpenAI input. Documentation: https://platform.openai.com/docs/api-reference...
Converts a conversation to an OpenAI input. Documentation: https://platform.openai.com/docs/api-reference/chat/create Args: conversation: The conversation to convert. generation_params: Parameters for generation during inference. model_params: Model parameters to us...
_convert_conversation_to_api_input
python
oumi-ai/oumi
src/oumi/inference/remote_vllm_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/remote_vllm_inference_engine.py
Apache-2.0
def _convert_conversation_to_api_input( self, conversation: Conversation, generation_params: GenerationParams, model_params: ModelParams, ) -> dict[str, Any]: """Converts a conversation to a SambaNova API input. This method transforms an Oumi Conversation object into...
Converts a conversation to a SambaNova API input. This method transforms an Oumi Conversation object into a format suitable for the SambaNova API. It handles the conversion of messages and generation parameters according to the API specification. Args: conversation: The Oum...
_convert_conversation_to_api_input
python
oumi-ai/oumi
src/oumi/inference/sambanova_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/sambanova_inference_engine.py
Apache-2.0
def _convert_api_output_to_conversation( self, response: dict[str, Any], original_conversation: Conversation ) -> Conversation: """Converts a SambaNova API response to a conversation. Args: response: The API response to convert. original_conversation: The original co...
Converts a SambaNova API response to a conversation. Args: response: The API response to convert. original_conversation: The original conversation. Returns: Conversation: The conversation including the generated response.
_convert_api_output_to_conversation
python
oumi-ai/oumi
src/oumi/inference/sambanova_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/sambanova_inference_engine.py
Apache-2.0
def _get_request_headers(self, remote_params: RemoteParams) -> dict[str, str]: """Get headers for the API request. Args: remote_params: Remote server parameters. Returns: Dict[str, str]: Headers for the API request. """ headers = { "Content-T...
Get headers for the API request. Args: remote_params: Remote server parameters. Returns: Dict[str, str]: Headers for the API request.
_get_request_headers
python
oumi-ai/oumi
src/oumi/inference/sambanova_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/sambanova_inference_engine.py
Apache-2.0
def get_supported_params(self) -> set[str]: """Returns a set of supported generation parameters for this engine.""" return { "max_new_tokens", "stop_strings", "temperature", "top_p", }
Returns a set of supported generation parameters for this engine.
get_supported_params
python
oumi-ai/oumi
src/oumi/inference/sambanova_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/sambanova_inference_engine.py
Apache-2.0
def __init__( self, model_params: ModelParams, *, remote_params: RemoteParams | None = None, generation_params: GenerationParams | None = None, ): """Initializes the SGL inference Engine. Args: model_params: The model parameters to use for inferen...
Initializes the SGL inference Engine. Args: model_params: The model parameters to use for inference. remote_params: Remote server params. generation_params: The generation parameters to use for inference.
__init__
python
oumi-ai/oumi
src/oumi/inference/sglang_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/sglang_inference_engine.py
Apache-2.0
def _convert_conversation_to_api_input( self, conversation: Conversation, generation_params: GenerationParams, model_params: ModelParams, ) -> dict[str, Any]: """Converts a conversation to SGLang Native API input. See https://sgl-project.github.io/references/sampling...
Converts a conversation to SGLang Native API input. See https://sgl-project.github.io/references/sampling_params.html for details. Args: conversation: The Oumi Conversation object to convert. generation_params: Parameters for text generation. model_params: Ignored. ...
_convert_conversation_to_api_input
python
oumi-ai/oumi
src/oumi/inference/sglang_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/sglang_inference_engine.py
Apache-2.0
def _convert_api_output_to_conversation( self, response: dict[str, Any], original_conversation: Conversation ) -> Conversation: """Converts an SGLang Native API response to a conversation.""" new_message = Message( content=response["text"], role=Role.ASSISTANT, ...
Converts an SGLang Native API response to a conversation.
_convert_api_output_to_conversation
python
oumi-ai/oumi
src/oumi/inference/sglang_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/sglang_inference_engine.py
Apache-2.0
def get_supported_params(self) -> set[str]: """Returns a set of supported generation parameters for this engine.""" return { "frequency_penalty", "guided_decoding", "max_new_tokens", "min_p", "presence_penalty", "stop_strings", ...
Returns a set of supported generation parameters for this engine.
get_supported_params
python
oumi-ai/oumi
src/oumi/inference/sglang_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/sglang_inference_engine.py
Apache-2.0
def __init__( self, model_params: ModelParams, *, generation_params: GenerationParams | None = None, tensor_parallel_size: int = -1, quantization: str | None = None, enable_prefix_caching: bool = True, gpu_memory_utilization: float = 0.9, enforce_e...
Initializes the inference Engine. Args: model_params: The model parameters to use for inference. generation_params: The generation parameters to use for inference. tensor_parallel_size: The number of tensor parallel processes to use. If set to -1, we will use...
__init__
python
oumi-ai/oumi
src/oumi/inference/vllm_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/vllm_inference_engine.py
Apache-2.0
def _convert_conversation_to_vllm_input( self, conversation: Conversation ) -> list[ChatCompletionMessageParam]: """Converts a conversation to a list of vllm input messages. Args: conversation: The conversation to convert. Returns: List[ChatCompletionMessage...
Converts a conversation to a list of vllm input messages. Args: conversation: The conversation to convert. Returns: List[ChatCompletionMessageParam]: A list of vllm input messages.
_convert_conversation_to_vllm_input
python
oumi-ai/oumi
src/oumi/inference/vllm_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/vllm_inference_engine.py
Apache-2.0
def _infer( self, input: list[Conversation], inference_config: InferenceConfig | None = None, ) -> list[Conversation]: """Runs model inference on the provided input. Documentation: https://docs.vllm.ai/en/stable/dev/sampling_params.html Args: input: A li...
Runs model inference on the provided input. Documentation: https://docs.vllm.ai/en/stable/dev/sampling_params.html Args: input: A list of conversations to run inference on. inference_config: Parameters for inference. Returns: List[Conversation]: Inference o...
_infer
python
oumi-ai/oumi
src/oumi/inference/vllm_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/vllm_inference_engine.py
Apache-2.0
def infer_online( self, input: list[Conversation], inference_config: InferenceConfig | None = None, ) -> list[Conversation]: """Runs model inference online. Args: input: A list of conversations to run inference on. inference_config: Parameters for inf...
Runs model inference online. Args: input: A list of conversations to run inference on. inference_config: Parameters for inference. Returns: List[Conversation]: Inference output.
infer_online
python
oumi-ai/oumi
src/oumi/inference/vllm_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/vllm_inference_engine.py
Apache-2.0
def infer_from_file( self, input_filepath: str, inference_config: InferenceConfig | None = None, ) -> list[Conversation]: """Runs model inference on inputs in the provided file. This is a convenience method to prevent boilerplate from asserting the existence of input...
Runs model inference on inputs in the provided file. This is a convenience method to prevent boilerplate from asserting the existence of input_filepath in the generation_params. Args: input_filepath: Path to the input file containing prompts for generation. ...
infer_from_file
python
oumi-ai/oumi
src/oumi/inference/vllm_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/vllm_inference_engine.py
Apache-2.0
def get_supported_params(self) -> set[str]: """Returns a set of supported generation parameters for this engine.""" return { "frequency_penalty", "guided_decoding", "max_new_tokens", "min_p", "presence_penalty", "stop_strings", ...
Returns a set of supported generation parameters for this engine.
get_supported_params
python
oumi-ai/oumi
src/oumi/inference/vllm_inference_engine.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/inference/vllm_inference_engine.py
Apache-2.0
def from_xml_output(cls, raw_judgement: Optional[str]) -> Optional[Self]: """Parses the judgement from XML-like tags in the raw output. Args: raw_judgement: The raw judgement string to parse. Returns: Optional[Self]: An instance of the class with parsed attributes, ...
Parses the judgement from XML-like tags in the raw output. Args: raw_judgement: The raw judgement string to parse. Returns: Optional[Self]: An instance of the class with parsed attributes, or None if parsing fails.
from_xml_output
python
oumi-ai/oumi
src/oumi/judges/base_judge.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/judges/base_judge.py
Apache-2.0
def fields(self): """Return the fields of the judgement.""" fields = self.model_dump() fields.pop("raw_judgement", None) fields.pop("template", None) fields.pop("role", None) return fields
Return the fields of the judgement.
fields
python
oumi-ai/oumi
src/oumi/judges/base_judge.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/judges/base_judge.py
Apache-2.0
def oumi_v1_xml_claude_sonnet_judge() -> JudgeConfig: """Returns a JudgeConfig for the Oumi v1 XML Anthropic judge. This function creates and returns a JudgeConfig object for the Oumi V1 Judge, which uses Claude Sonnet as a judge, with inputs and outputs in XML format. Returns: JudgeConfig: A ...
Returns a JudgeConfig for the Oumi v1 XML Anthropic judge. This function creates and returns a JudgeConfig object for the Oumi V1 Judge, which uses Claude Sonnet as a judge, with inputs and outputs in XML format. Returns: JudgeConfig: A configuration object for the Oumi v1 XML Anthropic judge. ...
oumi_v1_xml_claude_sonnet_judge
python
oumi-ai/oumi
src/oumi/judges/judge_court.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/judges/judge_court.py
Apache-2.0
def oumi_v1_xml_local_judge() -> JudgeConfig: """Returns a JudgeConfig for the Oumi v1 XML local judge. Returns: JudgeConfig: A configuration object for the Oumi v1 XML local judge. Note: This judge uses a local GGUF model file for inference. """ judges_directory = get_oumi_root_di...
Returns a JudgeConfig for the Oumi v1 XML local judge. Returns: JudgeConfig: A configuration object for the Oumi v1 XML local judge. Note: This judge uses a local GGUF model file for inference.
oumi_v1_xml_local_judge
python
oumi-ai/oumi
src/oumi/judges/judge_court.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/judges/judge_court.py
Apache-2.0
def oumi_v1_xml_gpt4o_judge() -> JudgeConfig: """Returns a JudgeConfig for the Oumi v1 XML GPT-4 judge. This function creates and returns a JudgeConfig object for the Oumi V1 Judge, which uses GPT-4 as a judge, with inputs and outputs in XML format. Returns: JudgeConfig: A configuration object...
Returns a JudgeConfig for the Oumi v1 XML GPT-4 judge. This function creates and returns a JudgeConfig object for the Oumi V1 Judge, which uses GPT-4 as a judge, with inputs and outputs in XML format. Returns: JudgeConfig: A configuration object for the Oumi v1 XML GPT-4 judge. Note: ...
oumi_v1_xml_gpt4o_judge
python
oumi-ai/oumi
src/oumi/judges/judge_court.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/judges/judge_court.py
Apache-2.0
def oumi_v1_xml_deepseek_r1_judge_hosted_by_deepseek() -> JudgeConfig: """Returns a JudgeConfig for the Oumi v1 XML DeepSeek R1 judge. This function creates and returns a JudgeConfig object for the Oumi V1 Judge, which uses DeepSeek R1 as a judge, with inputs and outputs in XML format. Returns: ...
Returns a JudgeConfig for the Oumi v1 XML DeepSeek R1 judge. This function creates and returns a JudgeConfig object for the Oumi V1 Judge, which uses DeepSeek R1 as a judge, with inputs and outputs in XML format. Returns: JudgeConfig: A configuration object for the Oumi v1 XML DeepSeek R1 judge. ...
oumi_v1_xml_deepseek_r1_judge_hosted_by_deepseek
python
oumi-ai/oumi
src/oumi/judges/judge_court.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/judges/judge_court.py
Apache-2.0
def oumi_v1_xml_deepseek_r1_judge_hosted_by_sambanova() -> JudgeConfig: """Returns a JudgeConfig for the Oumi v1 XML DeepSeek R1 judge. This function creates and returns a JudgeConfig object for the Oumi V1 Judge, which uses DeepSeek R1 as a judge, with inputs and outputs in XML format. Returns: ...
Returns a JudgeConfig for the Oumi v1 XML DeepSeek R1 judge. This function creates and returns a JudgeConfig object for the Oumi V1 Judge, which uses DeepSeek R1 as a judge, with inputs and outputs in XML format. Returns: JudgeConfig: A configuration object for the Oumi v1 XML DeepSeek R1 judge. ...
oumi_v1_xml_deepseek_r1_judge_hosted_by_sambanova
python
oumi-ai/oumi
src/oumi/judges/judge_court.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/judges/judge_court.py
Apache-2.0
def oumi_v1_xml_deepseek_r1_judge_hosted_by_together() -> JudgeConfig: """Returns a JudgeConfig for the Oumi v1 XML DeepSeek R1 judge. This function creates and returns a JudgeConfig object for the Oumi V1 Judge, which uses DeepSeek R1 as a judge, with inputs and outputs in XML format. Returns: ...
Returns a JudgeConfig for the Oumi v1 XML DeepSeek R1 judge. This function creates and returns a JudgeConfig object for the Oumi V1 Judge, which uses DeepSeek R1 as a judge, with inputs and outputs in XML format. Returns: JudgeConfig: A configuration object for the Oumi v1 XML DeepSeek R1 judge. ...
oumi_v1_xml_deepseek_r1_judge_hosted_by_together
python
oumi-ai/oumi
src/oumi/judges/judge_court.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/judges/judge_court.py
Apache-2.0
def unit_test_judge(): """Tiny judge for unit testing. Do not use this judge for anything serious as it returns random results. """ attribute_path = ( get_oumi_root_directory() / "judges" / "test_judge" / "helpful.json" ) attribute = JudgeAttribute[Union[OumiJudgeInput, OumiJudgeOutput...
Tiny judge for unit testing. Do not use this judge for anything serious as it returns random results.
unit_test_judge
python
oumi-ai/oumi
src/oumi/judges/judge_court.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/judges/judge_court.py
Apache-2.0
def label(self): """Convert the judgement to a boolean or Likert scale label.""" if self.judgement: if self.judgement.isdigit(): return int(self.judgement) try: return str_to_bool(self.judgement) except ValueError: retu...
Convert the judgement to a boolean or Likert scale label.
label
python
oumi-ai/oumi
src/oumi/judges/oumi_judge.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/judges/oumi_judge.py
Apache-2.0
def _initialize_new_clouds(self) -> None: """Initializes new clouds. Existing clouds are not re-initialized.""" for name, builder in REGISTRY.get_all(RegistryType.CLOUD).items(): if name not in self._clouds: self._clouds[name] = builder()
Initializes new clouds. Existing clouds are not re-initialized.
_initialize_new_clouds
python
oumi-ai/oumi
src/oumi/launcher/launcher.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/launcher.py
Apache-2.0
def _get_cloud_by_name(self, cloud: str) -> BaseCloud: """Gets the cloud instance for the specified cloud name.""" if cloud not in self._clouds: cloud_builder = REGISTRY.get(cloud, RegistryType.CLOUD) if not cloud_builder: raise ValueError(f"Cloud {cloud} not foun...
Gets the cloud instance for the specified cloud name.
_get_cloud_by_name
python
oumi-ai/oumi
src/oumi/launcher/launcher.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/launcher.py
Apache-2.0
def get_cloud(self, job_or_cloud: Union[JobConfig, str]) -> BaseCloud: """Gets the cloud instance for the specified job.""" if isinstance(job_or_cloud, str): return self._get_cloud_by_name(job_or_cloud) return self._get_cloud_by_name(job_or_cloud.resources.cloud)
Gets the cloud instance for the specified job.
get_cloud
python
oumi-ai/oumi
src/oumi/launcher/launcher.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/launcher.py
Apache-2.0
def run(self, job: JobConfig, cluster_name: str) -> JobStatus: """Runs the specified job on the specified cluster. Args: job: The job configuration. cluster_name: The name of the cluster to run the job on. Returns: Optional[JobStatus]: The status of the job....
Runs the specified job on the specified cluster. Args: job: The job configuration. cluster_name: The name of the cluster to run the job on. Returns: Optional[JobStatus]: The status of the job.
run
python
oumi-ai/oumi
src/oumi/launcher/launcher.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/launcher.py
Apache-2.0
def status( self, cloud: Optional[str] = None, cluster: Optional[str] = None, id: Optional[str] = None, ) -> dict[str, list[JobStatus]]: """Gets the status of all jobs across all clusters. Args: cloud: If specified, filters all jobs to only those on the s...
Gets the status of all jobs across all clusters. Args: cloud: If specified, filters all jobs to only those on the specified cloud. cluster: If specified, filters all jobs to only those on the specified cluster. id: If specified, filters all jobs to only those...
status
python
oumi-ai/oumi
src/oumi/launcher/launcher.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/launcher.py
Apache-2.0
def up( self, job: JobConfig, cluster_name: Optional[str], **kwargs ) -> tuple[BaseCluster, JobStatus]: """Creates a new cluster and starts the specified job on it.""" cloud = self.get_cloud(job) job_status = cloud.up_cluster(job, cluster_name, **kwargs) cluster = cloud.get_c...
Creates a new cluster and starts the specified job on it.
up
python
oumi-ai/oumi
src/oumi/launcher/launcher.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/launcher.py
Apache-2.0
def __init__(self): """Initializes a new instance of the LocalClient class.""" self._mutex = Lock() self._next_job_id = 0 # A mapping of job IDs to their respective job configurations. self._jobs = {} self._running_process = None self._worker = Thread(target=self....
Initializes a new instance of the LocalClient class.
__init__
python
oumi-ai/oumi
src/oumi/launcher/clients/local_client.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/clients/local_client.py
Apache-2.0
def _update_job_status(self, job_id: str, status: _JobState) -> None: """Updates the status of the job. Assumes the mutex is already acquired.""" if job_id not in self._jobs: return self._jobs[job_id].status.status = status.value is_done = status in (_JobState.COMPLETED, _Job...
Updates the status of the job. Assumes the mutex is already acquired.
_update_job_status
python
oumi-ai/oumi
src/oumi/launcher/clients/local_client.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/clients/local_client.py
Apache-2.0
def _worker_run_job(self) -> Optional[_LocalJob]: """Kicks off and returns a new job. Assumes the mutex is already acquired.""" job = self._get_next_job() if job is None: return None env_copy = os.environ.copy() env_copy.update(job.config.envs) # Check if the ...
Kicks off and returns a new job. Assumes the mutex is already acquired.
_worker_run_job
python
oumi-ai/oumi
src/oumi/launcher/clients/local_client.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/clients/local_client.py
Apache-2.0
def _worker_handle_running_job(self, job: _LocalJob) -> None: """Polls and handles the specified job. Acquires the mutex.""" # Return immediately if no job is running. if self._running_process is None: return # Wait for the job to finish. No need to grab the mutex here. ...
Polls and handles the specified job. Acquires the mutex.
_worker_handle_running_job
python
oumi-ai/oumi
src/oumi/launcher/clients/local_client.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/clients/local_client.py
Apache-2.0
def _worker_loop(self): """The main worker loop that runs jobs.""" while True: with self._mutex: # Run the next job if it exists. job = self._worker_run_job() # No job to run, sleep for a bit. if job is None: time.sleep(...
The main worker loop that runs jobs.
_worker_loop
python
oumi-ai/oumi
src/oumi/launcher/clients/local_client.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/clients/local_client.py
Apache-2.0
def _get_next_job(self) -> Optional[_LocalJob]: """Gets the next QUEUED job from the queue.""" queued_jobs = [ job for job in self._jobs.values() if job.status.status == _JobState.QUEUED.value ] if len(queued_jobs) == 0: return None ...
Gets the next QUEUED job from the queue.
_get_next_job
python
oumi-ai/oumi
src/oumi/launcher/clients/local_client.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/clients/local_client.py
Apache-2.0
def submit_job(self, job: JobConfig) -> JobStatus: """Runs the specified job on this cluster.""" with self._mutex: job_id = self._generate_next_job_id() name = job.name if job.name else job_id status = JobStatus( name=name, id=job_id, ...
Runs the specified job on this cluster.
submit_job
python
oumi-ai/oumi
src/oumi/launcher/clients/local_client.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/clients/local_client.py
Apache-2.0
def get_job(self, job_id: str) -> Optional[JobStatus]: """Gets the specified job's status. Args: job_id: The ID of the job to get. Returns: The job status if found, None otherwise. """ job_list = self.list_jobs() for job in job_list: ...
Gets the specified job's status. Args: job_id: The ID of the job to get. Returns: The job status if found, None otherwise.
get_job
python
oumi-ai/oumi
src/oumi/launcher/clients/local_client.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/clients/local_client.py
Apache-2.0
def cancel(self, job_id) -> Optional[JobStatus]: """Cancels the specified job. Args: job_id: The ID of the job to cancel. queue: The name of the queue to search. Returns: The job status if found, None otherwise. """ with self._mutex: ...
Cancels the specified job. Args: job_id: The ID of the job to cancel. queue: The name of the queue to search. Returns: The job status if found, None otherwise.
cancel
python
oumi-ai/oumi
src/oumi/launcher/clients/local_client.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/clients/local_client.py
Apache-2.0
def _check_connection(user: str): """Checks if the connection is still open.""" ssh_cmd = f"ssh {_CTRL_PATH} -O check {user}@polaris.alcf.anl.gov" try: child = subprocess.run( ssh_cmd, shell=True, capture_output=True, timeout=10, ) except s...
Checks if the connection is still open.
_check_connection
python
oumi-ai/oumi
src/oumi/launcher/clients/polaris_client.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/clients/polaris_client.py
Apache-2.0
def retry_auth(user_function): """Decorator to ensure auth is fresh before calling a function.""" @functools.wraps(user_function) def wrapper(self, *args, **kwargs): self._refresh_creds() return user_function(self, *args, **kwargs) return wrapper
Decorator to ensure auth is fresh before calling a function.
retry_auth
python
oumi-ai/oumi
src/oumi/launcher/clients/polaris_client.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/clients/polaris_client.py
Apache-2.0
def _split_status_line(self, line: str, metadata: str) -> JobStatus: """Splits a status line into a JobStatus object. The expected order of job fields is: 0. Job ID 1. User 2. Queue 3. Job Name 4. Session ID 5. Node Count 6. Tasks 7. Requi...
Splits a status line into a JobStatus object. The expected order of job fields is: 0. Job ID 1. User 2. Queue 3. Job Name 4. Session ID 5. Node Count 6. Tasks 7. Required Memory 8. Required Time 9. Status 10. Ellapsed Time ...
_split_status_line
python
oumi-ai/oumi
src/oumi/launcher/clients/polaris_client.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/clients/polaris_client.py
Apache-2.0
def _get_short_job_id(self, job_id: str) -> str: """Gets the short form of the job ID. Polaris Job IDs should be of the form: `2037042.polaris-pbs-01.hsn.cm.polaris.alcf.anl.gov` where the shortened ID is `2037042`. Args: job_id: The job ID to shorten. Retu...
Gets the short form of the job ID. Polaris Job IDs should be of the form: `2037042.polaris-pbs-01.hsn.cm.polaris.alcf.anl.gov` where the shortened ID is `2037042`. Args: job_id: The job ID to shorten. Returns: The short form of the job ID.
_get_short_job_id
python
oumi-ai/oumi
src/oumi/launcher/clients/polaris_client.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/launcher/clients/polaris_client.py
Apache-2.0