code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def _read_line(file_path: str, idx: int) -> str: """Reads a single line from a file as a raw string.""" with open(file_path) as f: for i, line in enumerate(f): if i == idx: return line raise RuntimeError(f"Index out of range: {idx}")
Reads a single line from a file as a raw string.
_read_line
python
oumi-ai/oumi
scripts/examples/batch_inference/bulk_infer.py
https://github.com/oumi-ai/oumi/blob/master/scripts/examples/batch_inference/bulk_infer.py
Apache-2.0
def run_batch(file_path: str) -> str: """Runs batch prediction on the conversations in `file_path`. Args: file_path (str): the path to a jsonl file of conversations. Returns: str: the id of the batch job. """ config = InferenceConfig( model=ModelParams(model_name="gpt-4o-mi...
Runs batch prediction on the conversations in `file_path`. Args: file_path (str): the path to a jsonl file of conversations. Returns: str: the id of the batch job.
run_batch
python
oumi-ai/oumi
scripts/examples/batch_inference/bulk_infer.py
https://github.com/oumi-ai/oumi/blob/master/scripts/examples/batch_inference/bulk_infer.py
Apache-2.0
def _load_dataset(num_examples: Optional[int]) -> TextSftJsonLinesDataset: """Load the facebook/anli dataset, formatted as a classification problem.""" anli_dataset = datasets.load_dataset("facebook/anli", split="test_r3") evaluation_dataset = [] for anli_example in anli_dataset: assert isinstan...
Load the facebook/anli dataset, formatted as a classification problem.
_load_dataset
python
oumi-ai/oumi
scripts/examples/evaluation/custom_evaluation.py
https://github.com/oumi-ai/oumi/blob/master/scripts/examples/evaluation/custom_evaluation.py
Apache-2.0
def _extract_prediction(response: str) -> int: """Converts a response to a label: [0, 1], or -1 if inconclusive.""" is_unsupported = "<|unsupported|>" in response is_supported = "<|supported|>" in response if is_unsupported == is_supported: return -1 return 0 if is_supported else 1
Converts a response to a label: [0, 1], or -1 if inconclusive.
_extract_prediction
python
oumi-ai/oumi
scripts/examples/evaluation/custom_evaluation.py
https://github.com/oumi-ai/oumi/blob/master/scripts/examples/evaluation/custom_evaluation.py
Apache-2.0
def get_new_inputs(source: str, processed: str) -> list[str]: """Returns a list of new files in the source directory.""" source_filenames = {f.name for f in Path(source).rglob("*")} processed_filenames = {f.name for f in Path(processed).rglob("*")} return list(source_filenames - processed_filenames)
Returns a list of new files in the source directory.
get_new_inputs
python
oumi-ai/oumi
scripts/inference/gcp_inference.py
https://github.com/oumi-ai/oumi/blob/master/scripts/inference/gcp_inference.py
Apache-2.0
def main(): """Runs inference on new files in the input directory.""" parallelism = 1 if len(sys.argv) > 1: parallelism = int(sys.argv[1]) os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" print("Checking for new files...") new_files = get_new_inputs(_INPUT_DIR, _OUTPUT_DIR) yaml_...
Runs inference on new files in the input directory.
main
python
oumi-ai/oumi
scripts/inference/gcp_inference.py
https://github.com/oumi-ai/oumi/blob/master/scripts/inference/gcp_inference.py
Apache-2.0
def main() -> None: """Run inference against vLLM model hosted as an OpenAI API.""" openai_api_key = "EMPTY" IP = os.environ["THIS_IP_ADDRESS"] openai_api_base = f"http://{IP}:8000/v1" client = OpenAI( # defaults to os.environ.get("OPENAI_API_KEY") api_key=openai_api_key, bas...
Run inference against vLLM model hosted as an OpenAI API.
main
python
oumi-ai/oumi
scripts/polaris/jobs/python/vllm_inference.py
https://github.com/oumi-ai/oumi/blob/master/scripts/polaris/jobs/python/vllm_inference.py
Apache-2.0
def main(): """Run parallelized inference against a vLLM server.""" locust.log.setup_logging("INFO") random.seed(RANDOM_SEED) IP = os.environ["THIS_IP_ADDRESS"] openai_api_base = f"http://{IP}:8000/v1" client = OpenAI( base_url=openai_api_base, ) models = client.models.list() ...
Run parallelized inference against a vLLM server.
main
python
oumi-ai/oumi
scripts/polaris/jobs/python/vllm_parallel_inference.py
https://github.com/oumi-ai/oumi/blob/master/scripts/polaris/jobs/python/vllm_parallel_inference.py
Apache-2.0
def run_inference(self): """Runs inference by pulling indices from queue shared by all workers.""" global output_queue global input_queue global REQUEST_TIMES global failed_request_counts while True: index = input_queue.get() ...
Runs inference by pulling indices from queue shared by all workers.
run_inference
python
oumi-ai/oumi
scripts/polaris/jobs/python/vllm_parallel_inference.py
https://github.com/oumi-ai/oumi/blob/master/scripts/polaris/jobs/python/vllm_parallel_inference.py
Apache-2.0
def evaluate(config: EvaluationConfig) -> list[dict[str, Any]]: """Evaluates a model using the provided configuration. Args: config: The desired configuration for evaluation. Returns: A list of evaluation results (one for each task). Each evaluation result is a dictionary of metric...
Evaluates a model using the provided configuration. Args: config: The desired configuration for evaluation. Returns: A list of evaluation results (one for each task). Each evaluation result is a dictionary of metric names and their corresponding values.
evaluate
python
oumi-ai/oumi
src/oumi/evaluate.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/evaluate.py
Apache-2.0
def parse_cli(): """Parses command line arguments and return the configuration filename.""" parser = argparse.ArgumentParser() parser.add_argument( "-c", "--config", default=None, help="Path to the configuration file" ) args, arg_list = parser.parse_known_args() return args.config, arg_l...
Parses command line arguments and return the configuration filename.
parse_cli
python
oumi-ai/oumi
src/oumi/evaluate_async.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/evaluate_async.py
Apache-2.0
def main() -> None: """Main entry point for running aynsc Oumi evals. Evaluation arguments are fetched from the following sources, ordered by decreasing priority: 1. [Optional] Arguments provided as CLI arguments, in dotfile format 2. [Optional] Arguments provided in a yaml config file 3. Defau...
Main entry point for running aynsc Oumi evals. Evaluation arguments are fetched from the following sources, ordered by decreasing priority: 1. [Optional] Arguments provided as CLI arguments, in dotfile format 2. [Optional] Arguments provided in a yaml config file 3. Default arguments values defined...
main
python
oumi-ai/oumi
src/oumi/evaluate_async.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/evaluate_async.py
Apache-2.0
def _get_checkpoints(checkpoint_dir: Path) -> list[Path]: """Returns all checkpoints in the target directory.""" # Modified from HF's transformers.trainer_utils.get_last_checkpoint(). re_checkpoint = re.compile(r"^" + _PREFIX_CHECKPOINT_DIR + r"\-(\d+)$") return [ path for path in checkp...
Returns all checkpoints in the target directory.
_get_checkpoints
python
oumi-ai/oumi
src/oumi/evaluate_async.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/evaluate_async.py
Apache-2.0
def evaluate_async(config: AsyncEvaluationConfig) -> None: """Runs an async evaluation for a model using the provided configuration. Overview: This is a utility method for running evaluations iteratively over a series of checkpoints. This method can be run in parallel with a training job to ...
Runs an async evaluation for a model using the provided configuration. Overview: This is a utility method for running evaluations iteratively over a series of checkpoints. This method can be run in parallel with a training job to compute metrics per checkpoint without wasting valuable time ...
evaluate_async
python
oumi-ai/oumi
src/oumi/evaluate_async.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/evaluate_async.py
Apache-2.0
def _get_engine(config: InferenceConfig) -> BaseInferenceEngine: """Returns the inference engine based on the provided config.""" if config.engine is None: logger.warning( "No inference engine specified. Using the default 'native' engine." ) return build_inference_engine( ...
Returns the inference engine based on the provided config.
_get_engine
python
oumi-ai/oumi
src/oumi/infer.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/infer.py
Apache-2.0
def infer_interactive( config: InferenceConfig, *, input_image_bytes: Optional[list[bytes]] = None, system_prompt: Optional[str] = None, ) -> None: """Interactively provide the model response for a user-provided input.""" # Create engine up front to avoid reinitializing it for each input. in...
Interactively provide the model response for a user-provided input.
infer_interactive
python
oumi-ai/oumi
src/oumi/infer.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/infer.py
Apache-2.0
def infer( config: InferenceConfig, inputs: Optional[list[str]] = None, inference_engine: Optional[BaseInferenceEngine] = None, *, input_image_bytes: Optional[list[bytes]] = None, system_prompt: Optional[str] = None, ) -> list[Conversation]: """Runs batch inference for a model using the prov...
Runs batch inference for a model using the provided configuration. Args: config: The configuration to use for inference. inputs: A list of inputs for inference. inference_engine: The engine to use for inference. If unspecified, the engine will be inferred from `config`. ...
infer
python
oumi-ai/oumi
src/oumi/infer.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/infer.py
Apache-2.0
def judge_dataset(config: JudgeConfig, dataset: BaseSftDataset) -> list[dict[str, Any]]: """Judge a dataset. This function evaluates a given dataset using a specified Judge configuration. The function performs the following steps: 1. Initializes the Judge with the provided configuration. ...
Judge a dataset. This function evaluates a given dataset using a specified Judge configuration. The function performs the following steps: 1. Initializes the Judge with the provided configuration. 2. Iterates through the dataset to extract conversation inputs. 3. Uses the Judge to eva...
judge_dataset
python
oumi-ai/oumi
src/oumi/judge.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/judge.py
Apache-2.0
def judge_conversations( config: JudgeConfig, judge_inputs: list[Conversation] ) -> list[dict[str, Any]]: """Judge a list of conversations. This function evaluates a list of conversations using the specified Judge. The function performs the following steps: 1. Initializes the Judge with the p...
Judge a list of conversations. This function evaluates a list of conversations using the specified Judge. The function performs the following steps: 1. Initializes the Judge with the provided configuration. 2. Uses the Judge to evaluate each conversation input. 3. Collects and returns...
judge_conversations
python
oumi-ai/oumi
src/oumi/judge.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/judge.py
Apache-2.0
def _find_checkpoint_to_resume_from( resume_from_checkpoint: Optional[str], try_resume_from_last_checkpoint: bool, output_dir: str, ) -> Optional[str]: """Finds and returns the last checkpoint path to be passed to Trainer.""" checkpoint_path = None if resume_from_checkpoint: checkpoint_p...
Finds and returns the last checkpoint path to be passed to Trainer.
_find_checkpoint_to_resume_from
python
oumi-ai/oumi
src/oumi/train.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/train.py
Apache-2.0
def _create_training_dirs(config: TrainingConfig) -> None: """Creates misc directories referenced in config.""" _ensure_dir_exists(config.training.output_dir, "training.output_dir") telemetry_dir = config.training.telemetry_dir if telemetry_dir: _ensure_dir_exists(telemetry_dir, "training.teleme...
Creates misc directories referenced in config.
_create_training_dirs
python
oumi-ai/oumi
src/oumi/train.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/train.py
Apache-2.0
def _log_training_info(config: TrainingConfig) -> None: """Logs misc infos about training config/devices/etc. Writes to files.""" telemetry_dir = config.training.telemetry_dir if telemetry_dir and is_world_process_zero(): device_rank_info = get_device_rank_info() save_json( { ...
Logs misc infos about training config/devices/etc. Writes to files.
_log_training_info
python
oumi-ai/oumi
src/oumi/train.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/train.py
Apache-2.0
def _finalize_training_config(config: TrainingConfig) -> TrainingConfig: """Updates TrainingConfig using dynamic/runtime info.""" if config.training.dataloader_num_workers == "auto": # Resolve "auto" to an actual number. num_workers = estimate_dataloader_num_workers() logger.info( ...
Updates TrainingConfig using dynamic/runtime info.
_finalize_training_config
python
oumi-ai/oumi
src/oumi/train.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/train.py
Apache-2.0
def _verl_train( partial_trainer: Callable[[], BaseTrainer], checkpoint_location: Optional[str] ): """Runs verl training. This function initializes Ray, and then initializes and kicks off the trainer in a remote Ray function. """ try: import ray # pyright: ignore[reportMissingImports] ...
Runs verl training. This function initializes Ray, and then initializes and kicks off the trainer in a remote Ray function.
_verl_train
python
oumi-ai/oumi
src/oumi/train.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/train.py
Apache-2.0
def train( config: TrainingConfig, additional_model_kwargs: Optional[dict[str, Any]] = None, additional_trainer_kwargs: Optional[dict[str, Any]] = None, ) -> None: """Trains a model using the provided configuration.""" _START_TIME = time.time() _create_training_dirs(config) _log_training_in...
Trains a model using the provided configuration.
train
python
oumi-ai/oumi
src/oumi/train.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/train.py
Apache-2.0
def infer_interactive( config: InferenceConfig, *, input_image_bytes: list[bytes] | None = None, system_prompt: str | None = None, ) -> None: """Interactively provide the model response for a user-provided input.""" import oumi.infer return oumi.infer.infer_interactive( config, inpu...
Interactively provide the model response for a user-provided input.
infer_interactive
python
oumi-ai/oumi
src/oumi/__init__.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/__init__.py
Apache-2.0
def infer( config: InferenceConfig, inputs: list[str] | None = None, inference_engine: BaseInferenceEngine | None = None, *, input_image_bytes: list[bytes] | None = None, ) -> list[Conversation]: """Runs batch inference for a model using the provided configuration. Args: config: The...
Runs batch inference for a model using the provided configuration. Args: config: The configuration to use for inference. inputs: A list of inputs for inference. inference_engine: The engine to use for inference. If unspecified, the engine will be inferred from `config`. ...
infer
python
oumi-ai/oumi
src/oumi/__init__.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/__init__.py
Apache-2.0
def judge_conversations( config: JudgeConfig, judge_inputs: list[Conversation] ) -> list[dict[str, Any]]: """Judge a list of conversations. This function evaluates a list of conversations using the specified Judge. The function performs the following steps: 1. Initializes the Judge with the p...
Judge a list of conversations. This function evaluates a list of conversations using the specified Judge. The function performs the following steps: 1. Initializes the Judge with the provided configuration. 2. Uses the Judge to evaluate each conversation input. 3. Collects and returns...
judge_conversations
python
oumi-ai/oumi
src/oumi/__init__.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/__init__.py
Apache-2.0
def judge_dataset(config: JudgeConfig, dataset: BaseSftDataset) -> list[dict[str, Any]]: """Judge a dataset. This function evaluates a given dataset using a specified Judge configuration. The function performs the following steps: 1. Initializes the Judge with the provided configuration. ...
Judge a dataset. This function evaluates a given dataset using a specified Judge configuration. The function performs the following steps: 1. Initializes the Judge with the provided configuration. 2. Iterates through the dataset to extract conversation inputs. 3. Uses the Judge to eva...
judge_dataset
python
oumi-ai/oumi
src/oumi/__init__.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/__init__.py
Apache-2.0
def train( config: TrainingConfig, additional_model_kwargs: dict[str, Any] | None = None, additional_trainer_kwargs: dict[str, Any] | None = None, ) -> None: """Trains a model using the provided configuration.""" import oumi.train return oumi.train.train( config, additional_mode...
Trains a model using the provided configuration.
train
python
oumi-ai/oumi
src/oumi/__init__.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/__init__.py
Apache-2.0
def build_training_callbacks( config: TrainingConfig, model: torch.nn.Module, profiler: Optional[Any] ) -> list[BaseTrainerCallback]: """Builds the training callbacks for the given training config and model. This function creates a list of callback objects to be used during training. It includes callba...
Builds the training callbacks for the given training config and model. This function creates a list of callback objects to be used during training. It includes callbacks for performance metrics, profiling, telemetry, and Model Flops Utilization (MFU) logging based on the provided configuration. Args: ...
build_training_callbacks
python
oumi-ai/oumi
src/oumi/builders/callbacks.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/callbacks.py
Apache-2.0
def build_data_collator( collator_name: str, tokenizer: BaseTokenizer, *, max_length: Optional[int], label_ignore_index: Optional[int] = constants.LABEL_IGNORE_INDEX, debug: bool = False, **kwargs, ) -> Callable: """Builds a data collator based on the given collator name. Args: ...
Builds a data collator based on the given collator name. Args: collator_name: The name of the collator to build. Supported values are: - "text_with_padding": Uses `TextCollatorWithPadding`. - "text_completions_only_with_padding": Uses `TextCompletionsCol...
build_data_collator
python
oumi-ai/oumi
src/oumi/builders/collators.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/collators.py
Apache-2.0
def build_collator_from_config( config: TrainingConfig, tokenizer: Optional[BaseTokenizer], debug: bool = False ) -> Optional[Callable]: """Creates data collator if specified in config.""" train_split = config.data.get_split(DatasetSplit.TRAIN) if not train_split.collator_name: return None c...
Creates data collator if specified in config.
build_collator_from_config
python
oumi-ai/oumi
src/oumi/builders/collators.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/collators.py
Apache-2.0
def build_dataset_mixture( data_params: DataParams, tokenizer: Optional[BaseTokenizer], dataset_split: DatasetSplit, seq_length: Optional[int] = None, seed: Optional[int] = None, ) -> Union[DatasetType, PretrainingAsyncTextDataset]: """Builds a dataset for the specified split. Args: ...
Builds a dataset for the specified split. Args: data_params: The data params. tokenizer: The tokenizer object to use for preprocessing. dataset_split: The split of the dataset to load. seq_length: The length each example will be packed to. This is only used if packing is...
build_dataset_mixture
python
oumi-ai/oumi
src/oumi/builders/data.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/data.py
Apache-2.0
def build_dataset( dataset_name: str, tokenizer: Optional[BaseTokenizer], seed: Optional[int] = None, stream: bool = False, pack: bool = False, use_torchdata: Optional[bool] = None, **kwargs, ) -> Union[DatasetType, PretrainingAsyncTextDataset]: """Builds a dataset from a dataset name. ...
Builds a dataset from a dataset name. Please refer to `DatasetParams` & `DatasetSplitParams` for a description of the all the arguments.
build_dataset
python
oumi-ai/oumi
src/oumi/builders/data.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/data.py
Apache-2.0
def _mix_datasets( dataset_list: list[DatasetType], mixture_proportions: Sequence[Optional[float]], mixture_strategy: str, seed: Optional[int], ) -> DatasetType: """Joins multiple datasets using the provided `mixture_strategy`.""" if any([proportion is None for proportion in mixture_proportions]...
Joins multiple datasets using the provided `mixture_strategy`.
_mix_datasets
python
oumi-ai/oumi
src/oumi/builders/data.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/data.py
Apache-2.0
def _build_iterable_dataset_sampler( dataset: datasets.IterableDataset, n: int ) -> Callable: """Returns a generator that supports oversampling an IterableDataset.""" def _generator(): generation_count = 0 while generation_count < n: for generation in dataset: ge...
Returns a generator that supports oversampling an IterableDataset.
_build_iterable_dataset_sampler
python
oumi-ai/oumi
src/oumi/builders/data.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/data.py
Apache-2.0
def _load_dataset( dataset_params: DatasetParams, stream: bool, tokenizer: Optional[BaseTokenizer] = None, ) -> Union[ datasets.DatasetDict, datasets.Dataset, datasets.IterableDatasetDict, datasets.IterableDataset, ]: """Loads a dataset with the specified name and subset. Note: ...
Loads a dataset with the specified name and subset. Note: For custom map datasets, streaming is only partially supported: - The full dataset is downloaded (or loaded from disk), and loaded in memory. - However, transformations are applied lazily in streaming mode. The raw datas...
_load_dataset
python
oumi-ai/oumi
src/oumi/builders/data.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/data.py
Apache-2.0
def _is_mixture_packed(dataset_split_params: DatasetSplitParams) -> bool: """Returns whether all datasets in the mixture are packed. Raises: ValueError: If a mixture of packed and unpacked datasets is detected. """ num_packed = 0 for dataset in dataset_split_params.datasets: dataset...
Returns whether all datasets in the mixture are packed. Raises: ValueError: If a mixture of packed and unpacked datasets is detected.
_is_mixture_packed
python
oumi-ai/oumi
src/oumi/builders/data.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/data.py
Apache-2.0
def build_inference_engine( engine_type: InferenceEngineType, model_params: ModelParams, remote_params: Optional[RemoteParams] = None, generation_params: Optional[GenerationParams] = None, ) -> BaseInferenceEngine: """Returns the inference engine based on the provided config. Args: engi...
Returns the inference engine based on the provided config. Args: engine_type: Type of inference engine to create model_params: Model parameters remote_params: Remote configuration parameters (required for some engines) generation_params: Generation parameters Returns: A...
build_inference_engine
python
oumi-ai/oumi
src/oumi/builders/inference_engines.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/inference_engines.py
Apache-2.0
def build_lr_scheduler( optimizer: torch.optim.Optimizer, training_params: TrainingParams, num_training_steps: Optional[int] = None, current_epoch: int = 0, ) -> torch.optim.lr_scheduler.LRScheduler: """Builds a learning rate scheduler based on the provided training parameters. Args: op...
Builds a learning rate scheduler based on the provided training parameters. Args: optimizer: The optimizer for which to build the learning rate scheduler. training_params: The training parameters containing the scheduler args. num_training_steps: The total number of training steps ...
build_lr_scheduler
python
oumi-ai/oumi
src/oumi/builders/lr_schedules.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/lr_schedules.py
Apache-2.0
def build_model( model_params: ModelParams, peft_params: Optional[PeftParams] = None, **kwargs, ) -> nn.Module: """Builds and returns a model based on the provided Oumi configuration. Args: model_params: The model parameters. peft_params: The PEFT parameters. kwargs (dict, o...
Builds and returns a model based on the provided Oumi configuration. Args: model_params: The model parameters. peft_params: The PEFT parameters. kwargs (dict, optional): Additional keyword arguments for model loading. Returns: model: The built model.
build_model
python
oumi-ai/oumi
src/oumi/builders/models.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/models.py
Apache-2.0
def _patch_model_for_liger_kernel(model: nn.Module) -> None: """Patches the model for Liger Kernel. The list of support models can be found here: https://github.com/linkedin/Liger-Kernel/blob/99599091373f178e8ad6a69ecb1b32351d1d5c1f/src/liger_kernel/transformers/monkey_patch.py#L700 If the model is no...
Patches the model for Liger Kernel. The list of support models can be found here: https://github.com/linkedin/Liger-Kernel/blob/99599091373f178e8ad6a69ecb1b32351d1d5c1f/src/liger_kernel/transformers/monkey_patch.py#L700 If the model is not supported, liger kernel patching will not be applied, and a wa...
_patch_model_for_liger_kernel
python
oumi-ai/oumi
src/oumi/builders/models.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/models.py
Apache-2.0
def build_oumi_model( model_params: ModelParams, peft_params: Optional[PeftParams] = None, **kwargs, ) -> nn.Module: """Builds a custom model from our Oumi registry.""" model_class = REGISTRY[model_params.model_name, RegistryType.MODEL] model = model_class(**model_params.model_kwargs) if mo...
Builds a custom model from our Oumi registry.
build_oumi_model
python
oumi-ai/oumi
src/oumi/builders/models.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/models.py
Apache-2.0
def build_huggingface_model( model_params: ModelParams, peft_params: Optional[PeftParams] = None, **kwargs, ) -> nn.Module: """Builds a HuggingFace model. If a local directory is specified, the model will be loaded from that checkpoint. Otherwise, `model_params.model_name` is the name of a Hugg...
Builds a HuggingFace model. If a local directory is specified, the model will be loaded from that checkpoint. Otherwise, `model_params.model_name` is the name of a HuggingFaceHub model. The model will be downloaded from the Hub to a local cache directory if it is not already present, and will be loaded...
build_huggingface_model
python
oumi-ai/oumi
src/oumi/builders/models.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/models.py
Apache-2.0
def is_image_text_llm_using_model_name( model_name: str, trust_remote_code: bool ) -> bool: """Determines whether the model is a basic image+text LLM.""" model_config = find_internal_model_config_using_model_name( model_name, trust_remote_code=trust_remote_code ) return model_config is not N...
Determines whether the model is a basic image+text LLM.
is_image_text_llm_using_model_name
python
oumi-ai/oumi
src/oumi/builders/models.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/models.py
Apache-2.0
def is_image_text_llm(model_params: ModelParams) -> bool: """Determines whether the model is a basic image+text LLM.""" return is_image_text_llm_using_model_name( model_params.model_name, model_params.trust_remote_code )
Determines whether the model is a basic image+text LLM.
is_image_text_llm
python
oumi-ai/oumi
src/oumi/builders/models.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/models.py
Apache-2.0
def build_cambrian_model( model_params: ModelParams, peft_params: Optional[PeftParams] = None, **kwargs, ) -> nn.Module: """Downloads and builds the model from the HuggingFace Hub.""" from importlib.util import find_spec for dependency_name in ("diffusers", "einops", "open_clip", "timm"): ...
Downloads and builds the model from the HuggingFace Hub.
build_cambrian_model
python
oumi-ai/oumi
src/oumi/builders/models.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/models.py
Apache-2.0
def build_tokenizer( model_params: ModelParams, ) -> Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast]: """Builds and returns a tokenizer based on the provided Oumi configuration. Args: model_params (ModelParams): The model parameters. Returns: tokenizer: Th...
Builds and returns a tokenizer based on the provided Oumi configuration. Args: model_params (ModelParams): The model parameters. Returns: tokenizer: The tokenizer object built from the configuration.
build_tokenizer
python
oumi-ai/oumi
src/oumi/builders/models.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/models.py
Apache-2.0
def build_peft_model( base_model, use_gradient_checkpointing: bool, peft_params: PeftParams ): """Builds a PEFT model based on the given base model and params. Args: base_model: The base model to build the PEFT model on. use_gradient_checkpointing: Enable/disable gradient checkpointing. ...
Builds a PEFT model based on the given base model and params. Args: base_model: The base model to build the PEFT model on. use_gradient_checkpointing: Enable/disable gradient checkpointing. peft_params: The desired params for LORA. Returns: The built PEFT model.
build_peft_model
python
oumi-ai/oumi
src/oumi/builders/models.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/models.py
Apache-2.0
def build_chat_template(template_name: str) -> str: """Builds a chat template based on code name. Args: template_name: the code name describing the chat-template. Raises: FileNotFoundError: if the requested template file does not exist. Returns: str: a jinja-based chat-templat...
Builds a chat template based on code name. Args: template_name: the code name describing the chat-template. Raises: FileNotFoundError: if the requested template file does not exist. Returns: str: a jinja-based chat-template.
build_chat_template
python
oumi-ai/oumi
src/oumi/builders/models.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/models.py
Apache-2.0
def build_optimizer( model: torch.nn.Module, config: TrainingParams ) -> torch.optim.Optimizer: """Builds and returns a PyTorch optimizer based on the provided configuration. See pytorch documentation for more information on available optimizers: https://pytorch.org/docs/stable/optim.html Args: ...
Builds and returns a PyTorch optimizer based on the provided configuration. See pytorch documentation for more information on available optimizers: https://pytorch.org/docs/stable/optim.html Args: model: The model whose parameters will be optimized. config: The configuration object contain...
build_optimizer
python
oumi-ai/oumi
src/oumi/builders/optimizers.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/optimizers.py
Apache-2.0
def build_dataset_mixture( data_params: DataParams, tokenizer: Optional[BaseTokenizer], dataset_split: DatasetSplit, seed: Optional[int] = None, ) -> IterDataPipe: """Builds a dataset for the specified split. Args: data_params: The data params. tokenizer: The tokenizer object to...
Builds a dataset for the specified split. Args: data_params: The data params. tokenizer: The tokenizer object to use for preprocessing. dataset_split: The split of the dataset to load. seed: If specified, a seed used for random sampling. Returns: dataset: The built data...
build_dataset_mixture
python
oumi-ai/oumi
src/oumi/builders/oumi_data.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/oumi_data.py
Apache-2.0
def _load_dataset( dataset_params: DatasetParams, stream: bool, tokenizer: Optional[BaseTokenizer] = None, ) -> IterDataPipe: """Loads a dataset and wraps it in a DataPipe if necessary.""" # First, try to load a custom dataset from the REGISTRY dataset_class = REGISTRY.get_dataset( datas...
Loads a dataset and wraps it in a DataPipe if necessary.
_load_dataset
python
oumi-ai/oumi
src/oumi/builders/oumi_data.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/oumi_data.py
Apache-2.0
def build_processor( processor_name: str, tokenizer: BaseTokenizer, *, processor_kwargs: Optional[dict[str, Any]] = None, trust_remote_code: bool = False, ) -> BaseProcessor: """Builds a processor. Args: processor_name: A name of the processor (usually, equals to a model name). ...
Builds a processor. Args: processor_name: A name of the processor (usually, equals to a model name). tokenizer: A tokenizer to use with the processor. processor_kwargs: A dictionary of processor-specific parameters. These parameters are passed to the processor constructor. ...
build_processor
python
oumi-ai/oumi
src/oumi/builders/processors.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/processors.py
Apache-2.0
def build_trainer( trainer_type: TrainerType, processor: Optional[BaseProcessor] ) -> Callable[..., BaseTrainer]: """Builds a trainer creator functor based on the provided configuration. Args: trainer_type (TrainerType): Enum indicating the type of training. processor: An optional processor...
Builds a trainer creator functor based on the provided configuration. Args: trainer_type (TrainerType): Enum indicating the type of training. processor: An optional processor. Returns: A builder function that can create an appropriate trainer based on the trainer type specified...
build_trainer
python
oumi-ai/oumi
src/oumi/builders/training.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/builders/training.py
Apache-2.0
def try_get_config_name_for_alias( alias: str, alias_type: AliasType, ) -> str: """Gets the config path for a given alias. This function resolves the config path for a given alias and alias type. If the alias is not found, the original alias is returned. Args: alias (str): The alias to...
Gets the config path for a given alias. This function resolves the config path for a given alias and alias type. If the alias is not found, the original alias is returned. Args: alias (str): The alias to resolve. alias_type (AliasType): The type of config to resolve. Returns: ...
try_get_config_name_for_alias
python
oumi-ai/oumi
src/oumi/cli/alias.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/alias.py
Apache-2.0
def parse_extra_cli_args(ctx: typer.Context) -> list[str]: """Parses extra CLI arguments into a list of strings. Args: ctx: The Typer context object. Returns: List[str]: The extra CLI arguments """ args = [] # The following formats are supported: # 1. Space separated: "--f...
Parses extra CLI arguments into a list of strings. Args: ctx: The Typer context object. Returns: List[str]: The extra CLI arguments
parse_extra_cli_args
python
oumi-ai/oumi
src/oumi/cli/cli_utils.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/cli_utils.py
Apache-2.0
def configure_common_env_vars() -> None: """Sets common environment variables if needed.""" if "ACCELERATE_LOG_LEVEL" not in os.environ: os.environ["ACCELERATE_LOG_LEVEL"] = "info" if "TOKENIZERS_PARALLELISM" not in os.environ: os.environ["TOKENIZERS_PARALLELISM"] = "false"
Sets common environment variables if needed.
configure_common_env_vars
python
oumi-ai/oumi
src/oumi/cli/cli_utils.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/cli_utils.py
Apache-2.0
def set_log_level(level: Optional[LogLevel]): """Sets the logging level for the current command. Args: level (Optional[LogLevel]): The log level to use. """ if not level: return uppercase_level = level.upper() logger.setLevel(uppercase_level) CONSOLE.print(f"Set log level to...
Sets the logging level for the current command. Args: level (Optional[LogLevel]): The log level to use.
set_log_level
python
oumi-ai/oumi
src/oumi/cli/cli_utils.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/cli_utils.py
Apache-2.0
def _resolve_oumi_prefix( config_path: str, output_dir: Optional[Path] = None ) -> tuple[str, Path]: """Resolves oumi:// prefix and determines output directory. Args: config_path: Path that may contain oumi:// prefix output_dir: Optional output directory override Returns: tuple...
Resolves oumi:// prefix and determines output directory. Args: config_path: Path that may contain oumi:// prefix output_dir: Optional output directory override Returns: tuple[str, Path]: (cleaned path, output directory)
_resolve_oumi_prefix
python
oumi-ai/oumi
src/oumi/cli/cli_utils.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/cli_utils.py
Apache-2.0
def resolve_and_fetch_config( config_path: str, output_dir: Optional[Path] = None, force: bool = True ) -> Path: """Resolve oumi:// prefix and fetch config if needed. Args: config_path: Original config path that may contain oumi:// prefix output_dir: Optional override for output directory ...
Resolve oumi:// prefix and fetch config if needed. Args: config_path: Original config path that may contain oumi:// prefix output_dir: Optional override for output directory force: Whether to overwrite an existing config Returns: Path: Local path to the config file
resolve_and_fetch_config
python
oumi-ai/oumi
src/oumi/cli/cli_utils.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/cli_utils.py
Apache-2.0
def __init__( self, node_rank: int, world_info: _WorldInfo, master_address: str, master_port: int, node_ips: list[str], ): """Initializes run info, and validates arguments.""" if not (world_info.num_nodes > 0 and world_info.gpus_per_node > 0): ...
Initializes run info, and validates arguments.
__init__
python
oumi-ai/oumi
src/oumi/cli/distributed_run.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/distributed_run.py
Apache-2.0
def __repr__(self) -> str: """Defines how this class is properly printed.""" fields_dict: dict[str, Any] = { "node_rank": self.node_rank, "num_nodes": self.num_nodes, "gpus_per_node": self.gpus_per_node, "total_gpus": self.total_gpus, "master_a...
Defines how this class is properly printed.
__repr__
python
oumi-ai/oumi
src/oumi/cli/distributed_run.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/distributed_run.py
Apache-2.0
def torchrun( ctx: typer.Context, level: cli_utils.LOG_LEVEL_TYPE = None, ) -> None: """Starts `torchrun` sub-process w/ automatically configured common params. Args: ctx: The Typer context object. level: The logging level for the specified command. """ try: run_info: _P...
Starts `torchrun` sub-process w/ automatically configured common params. Args: ctx: The Typer context object. level: The logging level for the specified command.
torchrun
python
oumi-ai/oumi
src/oumi/cli/distributed_run.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/distributed_run.py
Apache-2.0
def accelerate( ctx: typer.Context, level: cli_utils.LOG_LEVEL_TYPE = None, ) -> None: """Starts `accelerate` sub-process w/ automatically configured common params. Args: ctx: The Typer context object. level: The logging level for the specified command. """ try: run_info...
Starts `accelerate` sub-process w/ automatically configured common params. Args: ctx: The Typer context object. level: The logging level for the specified command.
accelerate
python
oumi-ai/oumi
src/oumi/cli/distributed_run.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/distributed_run.py
Apache-2.0
def _detect_process_run_info(env: dict[str, str]) -> _ProcessRunInfo: """Detects process run info. Uses known environment variables to detect common runtime parameters. Args: env: All environment variables. Returns: Process run info. Raises: ValueError: If any of the requ...
Detects process run info. Uses known environment variables to detect common runtime parameters. Args: env: All environment variables. Returns: Process run info. Raises: ValueError: If any of the required environment variables are missing or invalid. RuntimeError: If t...
_detect_process_run_info
python
oumi-ai/oumi
src/oumi/cli/distributed_run.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/distributed_run.py
Apache-2.0
def _get_package_version(package_name: str, version_fallback: str) -> str: """Gets the version of the specified package. Args: package_name: The name of the package. version_fallback: The fallback version string. Returns: str: The version of the package, or a fallback string if the...
Gets the version of the specified package. Args: package_name: The name of the package. version_fallback: The fallback version string. Returns: str: The version of the package, or a fallback string if the package is not installed.
_get_package_version
python
oumi-ai/oumi
src/oumi/cli/env.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/env.py
Apache-2.0
def env(): """Prints information about the current environment.""" # Delayed imports from oumi.utils.torch_utils import format_cudnn_version # End imports version_fallback = "<not installed>" env_var_fallback = "<not set>" # All relevant environment vars. env_vars = sorted( [ ...
Prints information about the current environment.
env
python
oumi-ai/oumi
src/oumi/cli/env.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/env.py
Apache-2.0
def fetch( config_path: Annotated[ str, typer.Argument( help="Path to config " "(e.g. oumi://configs/recipes/smollm/inference/135m_infer.yaml)" ), ], output_dir: Annotated[ Optional[Path], typer.Option( "--output-dir", "...
Fetch configuration files from GitHub repository.
fetch
python
oumi-ai/oumi
src/oumi/cli/fetch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/fetch.py
Apache-2.0
def infer( ctx: typer.Context, config: Annotated[ str, typer.Option( *cli_utils.CONFIG_FLAGS, help="Path to the configuration file for inference.", ), ], interactive: Annotated[ bool, typer.Option("-i", "--interactive", help="Run in an inte...
Run inference on a model. If `input_filepath` is provided in the configuration file, inference will run on those input examples. Otherwise, inference will run interactively with user-provided inputs. Args: ctx: The Typer context object. config: Path to the configuration file for infere...
infer
python
oumi-ai/oumi
src/oumi/cli/infer.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/infer.py
Apache-2.0
def model( ctx: typer.Context, config: Annotated[ str, typer.Option(*cli_utils.CONFIG_FLAGS, help="Path to the judge config file") ], inference_config: Annotated[ str, typer.Option(help="Path to the inference config file"), ], input_file: Annotated[ Optional[str],...
Judge the outputs of a model on a dataset.
model
python
oumi-ai/oumi
src/oumi/cli/judge.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/judge.py
Apache-2.0
def _get_working_dir(current: str) -> str: """Prompts the user to select the working directory, if relevant.""" if not is_dev_build(): return current oumi_root = get_git_root_dir() if not oumi_root or oumi_root == Path(current).resolve(): return current use_root = typer.confirm( ...
Prompts the user to select the working directory, if relevant.
_get_working_dir
python
oumi-ai/oumi
src/oumi/cli/launch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/launch.py
Apache-2.0
def _print_and_wait( message: str, task: Callable[..., bool], asynchronous=True, **kwargs ) -> None: """Prints a message with a loading spinner until the provided task is done.""" with cli_utils.CONSOLE.status(message): if asynchronous: with Pool(processes=1) as worker_pool: ...
Prints a message with a loading spinner until the provided task is done.
_print_and_wait
python
oumi-ai/oumi
src/oumi/cli/launch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/launch.py
Apache-2.0
def _is_job_done(id: str, cloud: str, cluster: str) -> bool: """Returns true IFF a job is no longer running.""" from oumi import launcher running_cloud = launcher.get_cloud(cloud) running_cluster = running_cloud.get_cluster(cluster) if not running_cluster: return True status = running_c...
Returns true IFF a job is no longer running.
_is_job_done
python
oumi-ai/oumi
src/oumi/cli/launch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/launch.py
Apache-2.0
def _cancel_worker(id: str, cloud: str, cluster: str) -> bool: """Cancels a job. All workers must return a boolean to indicate whether the task is done. Cancel has no intermediate states, so it always returns True. """ from oumi import launcher if not cluster: return True if not id...
Cancels a job. All workers must return a boolean to indicate whether the task is done. Cancel has no intermediate states, so it always returns True.
_cancel_worker
python
oumi-ai/oumi
src/oumi/cli/launch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/launch.py
Apache-2.0
def _down_worker(cluster: str, cloud: Optional[str]) -> bool: """Turns down a cluster. All workers must return a boolean to indicate whether the task is done. Down has no intermediate states, so it always returns True. """ from oumi import launcher if cloud: target_cloud = launcher.get...
Turns down a cluster. All workers must return a boolean to indicate whether the task is done. Down has no intermediate states, so it always returns True.
_down_worker
python
oumi-ai/oumi
src/oumi/cli/launch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/launch.py
Apache-2.0
def _stop_worker(cluster: str, cloud: Optional[str]) -> bool: """Stops a cluster. All workers must return a boolean to indicate whether the task is done. Stop has no intermediate states, so it always returns True. """ from oumi import launcher if cloud: target_cloud = launcher.get_clou...
Stops a cluster. All workers must return a boolean to indicate whether the task is done. Stop has no intermediate states, so it always returns True.
_stop_worker
python
oumi-ai/oumi
src/oumi/cli/launch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/launch.py
Apache-2.0
def _poll_job( job_status: "JobStatus", detach: bool, cloud: str, running_cluster: Optional["BaseCluster"] = None, ) -> None: """Polls a job until it is complete. If the job is running in detached mode and the job is not on the local cloud, the function returns immediately. """ from...
Polls a job until it is complete. If the job is running in detached mode and the job is not on the local cloud, the function returns immediately.
_poll_job
python
oumi-ai/oumi
src/oumi/cli/launch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/launch.py
Apache-2.0
def cancel( cloud: Annotated[str, typer.Option(help="Filter results by this cloud.")], cluster: Annotated[ str, typer.Option(help="Filter results by clusters matching this name."), ], id: Annotated[ str, typer.Option(help="Filter results by jobs matching this job ID.") ], ...
Cancels a job. Args: cloud: Filter results by this cloud. cluster: Filter results by clusters matching this name. id: Filter results by jobs matching this job ID. level: The logging level for the specified command.
cancel
python
oumi-ai/oumi
src/oumi/cli/launch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/launch.py
Apache-2.0
def down( cluster: Annotated[str, typer.Option(help="The cluster to turn down.")], cloud: Annotated[ Optional[str], typer.Option( help="If specified, only clusters on this cloud will be affected." ), ] = None, level: cli_utils.LOG_LEVEL_TYPE = None, ) -> None: """...
Turns down a cluster. Args: cluster: The cluster to turn down. cloud: If specified, only clusters on this cloud will be affected. level: The logging level for the specified command.
down
python
oumi-ai/oumi
src/oumi/cli/launch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/launch.py
Apache-2.0
def run( ctx: typer.Context, config: Annotated[ str, typer.Option( *cli_utils.CONFIG_FLAGS, help="Path to the configuration file for the job." ), ], cluster: Annotated[ Optional[str], typer.Option( help=( "The cluster to use...
Runs a job on the target cluster. Args: ctx: The Typer context object. config: Path to the configuration file for the job. cluster: The cluster to use for this job. If no such cluster exists, a new cluster will be created. If unspecified, a new cluster will be created with ...
run
python
oumi-ai/oumi
src/oumi/cli/launch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/launch.py
Apache-2.0
def status( cloud: Annotated[ Optional[str], typer.Option(help="Filter results by this cloud.") ] = None, cluster: Annotated[ Optional[str], typer.Option(help="Filter results by clusters matching this name."), ] = None, id: Annotated[ Optional[str], typer.Option(help=...
Prints the status of jobs launched from Oumi. Optionally, the caller may specify a job id, cluster, or cloud to further filter results. Args: cloud: Filter results by this cloud. cluster: Filter results by clusters matching this name. id: Filter results by jobs matching this job ID...
status
python
oumi-ai/oumi
src/oumi/cli/launch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/launch.py
Apache-2.0
def stop( cluster: Annotated[str, typer.Option(help="The cluster to stop.")], cloud: Annotated[ Optional[str], typer.Option( help="If specified, only clusters on this cloud will be affected." ), ] = None, level: cli_utils.LOG_LEVEL_TYPE = None, ) -> None: """Stops...
Stops a cluster. Args: cluster: The cluster to stop. cloud: If specified, only clusters on this cloud will be affected. level: The logging level for the specified command.
stop
python
oumi-ai/oumi
src/oumi/cli/launch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/launch.py
Apache-2.0
def up( ctx: typer.Context, config: Annotated[ str, typer.Option( *cli_utils.CONFIG_FLAGS, help="Path to the configuration file for the job." ), ], cluster: Annotated[ Optional[str], typer.Option( help=( "The cluster to use ...
Launches a job. Args: ctx: The Typer context object. config: Path to the configuration file for the job. cluster: The cluster to use for this job. If no such cluster exists, a new cluster will be created. If unspecified, a new cluster will be created with a unique na...
up
python
oumi-ai/oumi
src/oumi/cli/launch.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/launch.py
Apache-2.0
def train( ctx: typer.Context, config: Annotated[ str, typer.Option( *cli_utils.CONFIG_FLAGS, help="Path to the configuration file for training." ), ], level: cli_utils.LOG_LEVEL_TYPE = None, ): """Train a model. Args: ctx: The Typer context object. ...
Train a model. Args: ctx: The Typer context object. config: Path to the configuration file for training. level: The logging level for the specified command.
train
python
oumi-ai/oumi
src/oumi/cli/train.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/cli/train.py
Apache-2.0
def safe_asyncio_run(main: Coroutine[Any, Any, T]) -> T: """Run an Awaitable in a new thread. Blocks until the thread is finished. This circumvents the issue of running async functions in the main thread when an event loop is already running (Jupyter notebooks, for example). Prefer using `safe_asyncio...
Run an Awaitable in a new thread. Blocks until the thread is finished. This circumvents the issue of running async functions in the main thread when an event loop is already running (Jupyter notebooks, for example). Prefer using `safe_asyncio_run` over `asyncio.run` to allow upstream callers to ignore...
safe_asyncio_run
python
oumi-ai/oumi
src/oumi/core/async_utils.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/async_utils.py
Apache-2.0
def _get_use_orig_params(config: TrainingConfig) -> bool: """Returns whether to use the PyTorch Module's original parameters for FSDP. If the user specified a value, return that. Else, infer its value based on other config values (compilation, FSDP, PEFT). """ if config.fsdp.use_orig_params is not ...
Returns whether to use the PyTorch Module's original parameters for FSDP. If the user specified a value, return that. Else, infer its value based on other config values (compilation, FSDP, PEFT).
_get_use_orig_params
python
oumi-ai/oumi
src/oumi/core/distributed.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/distributed.py
Apache-2.0
def _parse_rank(rank: Optional[str]) -> int: """Parse the rank from the environment variable.""" if not rank: return 0 # -1 is a special value that means "not set". # It's used by the Accelerate launcher. # Defaulting to 0. if rank.strip() == "-1": return 0 if not rank.isdi...
Parse the rank from the environment variable.
_parse_rank
python
oumi-ai/oumi
src/oumi/core/distributed.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/distributed.py
Apache-2.0
def get_device_rank_info() -> DeviceRankInfo: """Returns device rank and world size.""" world_size = int(os.environ.get("WORLD_SIZE", 1)) if world_size <= 0: raise ValueError(f"WORLD_SIZE must be positive. Actual: {world_size}.") rank = _parse_rank(os.environ.get("RANK")) if rank < 0 or rank...
Returns device rank and world size.
get_device_rank_info
python
oumi-ai/oumi
src/oumi/core/distributed.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/distributed.py
Apache-2.0
def verify_torch_distributed_initialized_if_needed() -> None: """Checks if torch.dist is initialized if WORLD_SIZE> 1.""" device_rank_info: DeviceRankInfo = get_device_rank_info() world_size = device_rank_info.world_size if world_size > 1 and not ( torch.distributed.is_available() and torch.dist...
Checks if torch.dist is initialized if WORLD_SIZE> 1.
verify_torch_distributed_initialized_if_needed
python
oumi-ai/oumi
src/oumi/core/distributed.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/distributed.py
Apache-2.0
def is_world_process_zero() -> bool: """Whether or not this process is the global main process. When training in a distributed fashion on several machines this is only going to be `True` for one process. """ device_rank_info: DeviceRankInfo = get_device_rank_info() return device_rank_info.rank ...
Whether or not this process is the global main process. When training in a distributed fashion on several machines this is only going to be `True` for one process.
is_world_process_zero
python
oumi-ai/oumi
src/oumi/core/distributed.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/distributed.py
Apache-2.0
def is_local_process_zero() -> bool: """Whether or not this process is the local main process. When training in a distributed fashion on several machines this is only going to be `True` for one process per node. """ device_rank_info: DeviceRankInfo = get_device_rank_info() return device_rank_in...
Whether or not this process is the local main process. When training in a distributed fashion on several machines this is only going to be `True` for one process per node.
is_local_process_zero
python
oumi-ai/oumi
src/oumi/core/distributed.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/distributed.py
Apache-2.0
def is_distributed() -> bool: """Whether or not the training is distributed. Returns: bool: True if the training is distributed, False otherwise. """ device_rank_info: DeviceRankInfo = get_device_rank_info() return device_rank_info.world_size > 1
Whether or not the training is distributed. Returns: bool: True if the training is distributed, False otherwise.
is_distributed
python
oumi-ai/oumi
src/oumi/core/distributed.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/distributed.py
Apache-2.0
def barrier( group: Optional[torch.distributed.ProcessGroup] = None, monitored: bool = False ) -> None: """Barrier synchronization among all processes in the group.""" if torch.distributed.is_available() and torch.distributed.is_initialized(): if monitored: torch.distributed.monitored_ba...
Barrier synchronization among all processes in the group.
barrier
python
oumi-ai/oumi
src/oumi/core/distributed.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/distributed.py
Apache-2.0
def all_gather_object( obj: T, group: Optional[torch.distributed.ProcessGroup] = None ) -> list[T]: """Gathers picklable objects from the whole group into a list.""" verify_torch_distributed_initialized_if_needed() if is_distributed(): device_rank_info: DeviceRankInfo = get_device_rank_info() ...
Gathers picklable objects from the whole group into a list.
all_gather_object
python
oumi-ai/oumi
src/oumi/core/distributed.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/distributed.py
Apache-2.0
def local_leader_only(*barrier_args, **barrier_kwargs): """Decorator for local leaders only operations.""" def decorator(user_function): @functools.wraps(user_function) def wrapper(*args, **kwargs): if is_local_process_zero(): # Execute the user function ...
Decorator for local leaders only operations.
local_leader_only
python
oumi-ai/oumi
src/oumi/core/distributed.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/distributed.py
Apache-2.0
def local_leader_first(*args, **kwargs): """Context manager for local leader first operations.""" if is_local_process_zero(): yield barrier(*args, **kwargs) else: barrier(*args, **kwargs) yield
Context manager for local leader first operations.
local_leader_first
python
oumi-ai/oumi
src/oumi/core/distributed.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/distributed.py
Apache-2.0
def global_leader_only(*args, **kwargs): """Decorator for global leader only operations.""" def decorator(user_function): @functools.wraps(user_function) def wrapper(*user_fn_args, **user_fn_kwargs): if is_world_process_zero(): # Execute the user function ...
Decorator for global leader only operations.
global_leader_only
python
oumi-ai/oumi
src/oumi/core/distributed.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/distributed.py
Apache-2.0
def global_leader_first(*args, **kwargs): """Context manager for global leader first operations.""" if is_world_process_zero(): yield barrier(*args, **kwargs) else: barrier(*args, **kwargs) yield
Context manager for global leader first operations.
global_leader_first
python
oumi-ai/oumi
src/oumi/core/distributed.py
https://github.com/oumi-ai/oumi/blob/master/src/oumi/core/distributed.py
Apache-2.0