code stringlengths 66 870k | docstring stringlengths 19 26.7k | func_name stringlengths 1 138 | language stringclasses 1
value | repo stringlengths 7 68 | path stringlengths 5 324 | url stringlengths 46 389 | license stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def decode(self, input, *args, **kwargs ) -> Union[str, List[str]]:
"""
Perform decoding process of the tokenizer.
Parameters
------------
inputs : list.
The token sequence.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The text decoded from the token inputs.
"""
if isinstance(input, List):
input=torch.tensor(input)
if input.dim()==2:
return self.tokenizer.batch_decode(input, *args, **kwargs)#batch_decode
else:
# Can be list of ints or a Tensor
return self.tokenizer.decode(input, *args, **kwargs) |
Perform decoding process of the tokenizer.
Parameters
------------
inputs : list.
The token sequence.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The text decoded from the token inputs.
| decode | python | OptimalScale/LMFlow | src/lmflow/models/hf_encoder_decoder_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_encoder_decoder_model.py | Apache-2.0 |
def inference(self, inputs, *args, **kwargs):
"""
Perform generation process of the model.
Parameters
------------
inputs :
The sequence used as a prompt for the generation or as model inputs to the model.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
"""
# current_time = time.strftime("%H:%M:%S", time.localtime())
# print(f"{current_time}: model.inference: start", flush=True)
# TODO need to discuss how to handle pad_token_id
if self.arch_type == "encoder_decoder":
kwargs.update(pad_token_id=self.tokenizer.pad_token_id)
elif self.arch_type == "vision_encoder_decoder":
# TODO disucss how to modify the interface to remove this part.
inputs = copy.deepcopy(inputs)
input_ids = inputs.pop('input_ids')
kwargs.update(**inputs)
inputs = input_ids
# current_time = time.strftime("%H:%M:%S", time.localtime())
# print(f"{current_time}: model.inference: kwargs update end", flush=True)
with torch.no_grad():
if self.device == "gpu":
if getattr(self, "ds_engine", None) is not None:
outputs = self.ds_engine.module.generate(
input_ids=inputs,
synced_gpus=True,
*args,
**kwargs
)
else:
outputs = self.backend_model.generate(
input_ids=inputs,
synced_gpus=True,
*args,
**kwargs,
)
elif self.device == "cpu":
outputs = self.backend_model.generate(
input_ids=inputs,
synced_gpus=True,
*args,
**kwargs
)
else:
raise NotImplementedError(
f"device \"{self.device}\" is not supported"
)
# current_time = time.strftime("%H:%M:%S", time.localtime())
# print(f"{current_time}: model.inference: end", flush=True)
return outputs |
Perform generation process of the model.
Parameters
------------
inputs :
The sequence used as a prompt for the generation or as model inputs to the model.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
| inference | python | OptimalScale/LMFlow | src/lmflow/models/hf_encoder_decoder_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_encoder_decoder_model.py | Apache-2.0 |
def save(self, dir, save_full_model=False, *args, **kwargs):
"""
Perform generation process of the model.
Parameters
------------
dir :
The directory to save model and tokenizer
save_full_model : Optional.
Whether to save full model.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
"""
self.get_tokenizer().save_pretrained(dir)
if save_full_model and self.model_args.use_lora:
self.backend_model_full.save_pretrained(dir)
else:
self.get_backend_model().save_pretrained(dir) |
Perform generation process of the model.
Parameters
------------
dir :
The directory to save model and tokenizer
save_full_model : Optional.
Whether to save full model.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
| save | python | OptimalScale/LMFlow | src/lmflow/models/hf_encoder_decoder_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_encoder_decoder_model.py | Apache-2.0 |
def get_max_length(self):
"""
Return max acceptable input length in terms of tokens.
"""
if "tokenizer" not in self.tokenizer.__dict__:
return self.tokenizer.model_max_length
else:
# for the multi-modality processor,
# the max length is stored in the inner text tokenizer
return self.tokenizer.tokenizer.model_max_length |
Return max acceptable input length in terms of tokens.
| get_max_length | python | OptimalScale/LMFlow | src/lmflow/models/hf_encoder_decoder_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_encoder_decoder_model.py | Apache-2.0 |
def __init__(
self,
model_args: ModelArguments,
do_train: bool,
ds_config=None,
device: Optional[str]="gpu",
use_accelerator: bool=False,
hf_auto_model_additional_args: Optional[Dict]=None,
*args,
**kwargs
):
"""Initializes a HFModel instance.
Parameters
----------
model_args :
Dictionary with model arguments such as model name, path, revision, etc.
do_train : bool
To prepare the model for training or inference.
ds_config : optional
Deepspeed configuration for distributed training, by default None
device : str, optional
By default "gpu"
use_accelerator : bool, optional
By default False
"""
# See more about loading any type of standard or custom dataset (from
# files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training: The .from_pretrained methods guarantee that
# only one local process can concurrently download model & vocab.
self.device = device
self.model_args = model_args
self.hf_auto_model = HF_AUTOMODEL_MAPPING[model_args.arch_type]
self.use_accelerator = use_accelerator
self.ds_config = ds_config
self.do_train = do_train
self.tokenizer = self.__prepare_tokenizer(model_args)
self.torch_dtype = self.__prepare_dtype(model_args)
self.hf_model_config = self.__prepare_model_config(model_args, hf_auto_model_additional_args)
self.quant_config = self.__prepare_quant_config(model_args)
self.peft_config = self.__prepare_peft_config(model_args)
self._activated = False # for inference load and offload
# Some implementations require custom modules to be injected into the model.
self.__model_module_inject(model_args)
if self.do_train:
self.__prepare_model_for_training(model_args, self.hf_auto_model) | Initializes a HFModel instance.
Parameters
----------
model_args :
Dictionary with model arguments such as model name, path, revision, etc.
do_train : bool
To prepare the model for training or inference.
ds_config : optional
Deepspeed configuration for distributed training, by default None
device : str, optional
By default "gpu"
use_accelerator : bool, optional
By default False
| __init__ | python | OptimalScale/LMFlow | src/lmflow/models/hf_model_mixin.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_model_mixin.py | Apache-2.0 |
def __prepare_model_config(
self,
model_args: ModelArguments,
hf_auto_model_additional_args: Optional[Dict]=None,
):
"""Prepare model configuration for hf auto register,
Parameters
----------
model_args : ModelArguments
LMFlow model arguments.
hf_auto_model_additional_args : Optional[Dict], optional
Special configurations such as `num_labels` in `AutoModelForSequenceClassification`
(commonly used in reward modeling) will not preset in __prepare_model_config,
so it should be passed in hf_auto_model_additional_args.
Returns
-------
config : ModelConfig
hf model config.
"""
config_kwargs = {
"attn_implementation": "flash_attention_2" if model_args.use_flash_attention else None,
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"token": model_args.token,
"trust_remote_code": model_args.trust_remote_code,
"from_tf": bool(".ckpt" in model_args.model_name_or_path),
}
if hf_auto_model_additional_args is not None:
config_kwargs.update(hf_auto_model_additional_args)
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
return config | Prepare model configuration for hf auto register,
Parameters
----------
model_args : ModelArguments
LMFlow model arguments.
hf_auto_model_additional_args : Optional[Dict], optional
Special configurations such as `num_labels` in `AutoModelForSequenceClassification`
(commonly used in reward modeling) will not preset in __prepare_model_config,
so it should be passed in hf_auto_model_additional_args.
Returns
-------
config : ModelConfig
hf model config.
| __prepare_model_config | python | OptimalScale/LMFlow | src/lmflow/models/hf_model_mixin.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_model_mixin.py | Apache-2.0 |
def __model_module_inject(
self,
model_args: ModelArguments,
) -> None:
"""Override some model modules with custom implementations.
Current implementations:
- Position interpolation (model_args.do_rope_scaling):
replace llama embeddings with condense embeddings.
"""
# position interpolation
if model_args.do_rope_scaling:
if "LlamaForCausalLM" in self.model_config.architectures:
from lmflow.utils.position_interpolation.llama_rope_scaled_monkey_patch import (
replace_llama_with_condense,
)
replace_llama_with_condense(model_args.rope_pi_ratio, model_args.rope_ntk_ratio) | Override some model modules with custom implementations.
Current implementations:
- Position interpolation (model_args.do_rope_scaling):
replace llama embeddings with condense embeddings.
| __model_module_inject | python | OptimalScale/LMFlow | src/lmflow/models/hf_model_mixin.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_model_mixin.py | Apache-2.0 |
def deactivate_model_for_inference(
self,
use_vllm: bool=False,
):
"""Deactivate the model and release the resources.
NOTE: Currently, VLLM doesn't have an official way to do this, and the
implementation below cannot release all gpu resources by our observation.
Thus this method is just a placeholder for future implementation. See:
[Github issue](https://github.com/vllm-project/vllm/issues/1908)
"""
if not self._activated:
logger.warning("You are trying to deactivate the model for inference, but it is already deactivated.")
return
if use_vllm:
destroy_model_parallel()
del self.backend_model_for_inference.llm_engine.model_executor.driver_worker
del self.backend_model_for_inference
gc.collect()
torch.cuda.empty_cache()
else:
self.backend_model.to("cpu")
pass
self._activated = False | Deactivate the model and release the resources.
NOTE: Currently, VLLM doesn't have an official way to do this, and the
implementation below cannot release all gpu resources by our observation.
Thus this method is just a placeholder for future implementation. See:
[Github issue](https://github.com/vllm-project/vllm/issues/1908)
| deactivate_model_for_inference | python | OptimalScale/LMFlow | src/lmflow/models/hf_model_mixin.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_model_mixin.py | Apache-2.0 |
def __init__(
self,
model_args: ModelArguments,
tune_strategy: str='normal',
ds_config=None,
device="gpu",
use_accelerator=False,
*args,
**kwargs
):
"""
Initializes a HFTextRegressionModel instance.
:param model_args: dictionary with model arguments such as model name, path, revision, etc.
:param tune_strategy: tuning strategy: normal, none, lora or adapter
:param ds_config: deepspeed configuration for distributed training
"""
assert model_args.arch_type == "text_regression", (
f"Invalid model architecture type: {model_args.arch_type}. "
f"Expected: text_regression"
)
config_additional_args = {"num_labels": 1}
HFModelMixin.__init__(
self,
model_args=model_args,
do_train=True if tune_strategy == "normal" else False,
ds_config=ds_config,
device=device,
use_accelerator=use_accelerator,
hf_auto_model_additional_args=config_additional_args,
*args,
**kwargs
) |
Initializes a HFTextRegressionModel instance.
:param model_args: dictionary with model arguments such as model name, path, revision, etc.
:param tune_strategy: tuning strategy: normal, none, lora or adapter
:param ds_config: deepspeed configuration for distributed training
| __init__ | python | OptimalScale/LMFlow | src/lmflow/models/hf_text_regression_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_text_regression_model.py | Apache-2.0 |
def tokenize(
self,
dataset: Dataset,
add_special_tokens=True,
*args,
**kwargs
):
"""
Tokenize the full dataset.
Parameters
------------
dataset : lmflow.datasets.Dataset.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
tokenized_datasets :
The tokenized dataset, without any leading or trailing special
tokens (normally they are Begin-Of-Sentence or End-Of-Sentence
tokens).
"""
# Preprocessing the datasets.
# First we tokenize all the texts.
if dataset.get_backend() != "huggingface":
raise NotImplementedError(
"tokenization of datasets with non-huggingface backend are"
"not supported yet"
)
dataset_type = dataset.get_type()
model_args = self.model_args
raw_datasets = dataset
hf_raw_datasets = dataset.get_backend_dataset()
column_names = list(hf_raw_datasets.features) # in paired conversation, for example, would be 'chosen' and 'rejected'
data_args = raw_datasets.get_data_args()
# Whether to truncate long sequences to fit into max_length
use_truncation = False
if model_args.use_lora or data_args.disable_group_texts:
use_truncation = True
# Requires three types of information for tokenizing different datasets
# 1) Which fields require tokenization, e.g.
# "text2float": "text", but not "float"
# "text2text": both "input" and "output"
# 2) How will there tokenized sequence concatenated together, e.g.
# "text_only": "text" -> "text"
# "text2text": "input", "output" -> "input" + "output"
# 3) Which fields require loss in final computation, e.g.
# "text_only": "text"
# "text2text": "output" only
tokenize_fn = None
tokenize_fn_kwargs = {
"data_args": data_args,
"tokenizer": self.tokenizer,
"column_names": column_names,
}
if dataset_type == "text_only":
tokenize_fn = tokenize_function
text_only_tokenize_fn_kwargs = {
"tokenized_column_order": ["text"],
"label_columns": ["text"],
"add_special_tokens": add_special_tokens,
"use_truncation": use_truncation,
}
tokenize_fn_kwargs.update(text_only_tokenize_fn_kwargs)
elif dataset_type == "text2text":
tokenize_fn = tokenize_function
text2text_tokenize_fn_kwargs = {
"tokenized_column_order": ["input", "output"],
"label_columns": ["output"],
"add_special_tokens": False,
"use_truncation": use_truncation,
}
tokenize_fn_kwargs.update(text2text_tokenize_fn_kwargs)
elif dataset_type in ["conversation", "paired_conversation"]:
if dataset_type == "conversation":
tokenize_fn = conversation_tokenize_function
elif dataset_type == "paired_conversation":
tokenize_fn = paired_conversation_tokenize_function
if data_args.conversation_template:
if data_args.conversation_template in PRESET_TEMPLATES.keys():
conversation_template = PRESET_TEMPLATES[data_args.conversation_template]
else:
raise NotImplementedError(
f"Conversation template {data_args.conversation_template} is not supported yet."
)
else:
logger.warning("No conversation template provided. Using default template.")
conversation_template = PRESET_TEMPLATES['empty']
tokenize_fn_kwargs["conversation_template"] = conversation_template
logger.warning(f"Conversation template: {conversation_template}")
elif dataset_type == "text_to_textlist":
tokenize_fn = text_to_textlist_tokenize_function
text_to_textlist_tokenize_fn_kwargs = {
"add_special_tokens": False,
"use_truncation": use_truncation,
}
tokenize_fn_kwargs.update(text_to_textlist_tokenize_fn_kwargs)
else:
raise NotImplementedError(
f"Dataset type \"{dataset_type}\" is not supported, currently"
" only support following data types for HFTextRegressionModel:\n"
f" 1) [Inference]{TEXT_ONLY_DATASET_DESCRIPTION}\n"
f" 2) [Inference]{TEXT2TEXT_DATASET_DESCRIPTION}\n"
f" 3) [Training]{PAIRED_CONVERSATION_DATASET_DESCRIPTION}\n"
f" 4) [Inference]{CONVERSATION_DATASET_DESCRIPTION}\n"
f" 5) [Inference]{TEXT_TO_TEXTLIST_DATASET_DESCRIPTION}\n"
)
tokenize_kwargs = {}
if not data_args.streaming:
fingerprint = hashlib.md5(
(
raw_datasets.get_fingerprint()
+ str(self.tokenizer)
+ f'###padding_side={self.tokenizer.padding_side}'
+ ('###conversation_template=' + str(conversation_template) if "conversation" in dataset_type else "")
+ f'###disable_group_texts={data_args.disable_group_texts}'
+ f'###block_size={data_args.block_size}'
).encode("utf-8")
).hexdigest()
tokenize_kwargs = {
"num_proc": data_args.preprocessing_num_workers,
"load_from_cache_file": not data_args.overwrite_cache,
"desc": "Running tokenizer on dataset",
"new_fingerprint": fingerprint,
}
tokenized_datasets = raw_datasets.map(
tokenize_fn,
batched=True,
remove_columns=column_names,
fn_kwargs=tokenize_fn_kwargs,
**tokenize_kwargs
)
return tokenized_datasets |
Tokenize the full dataset.
Parameters
------------
dataset : lmflow.datasets.Dataset.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
tokenized_datasets :
The tokenized dataset, without any leading or trailing special
tokens (normally they are Begin-Of-Sentence or End-Of-Sentence
tokens).
| tokenize | python | OptimalScale/LMFlow | src/lmflow/models/hf_text_regression_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_text_regression_model.py | Apache-2.0 |
def inference(
self,
inputs,
release_gpu: bool = False,
use_vllm: bool = False,
**kwargs
) -> Union[List[float], SequenceClassifierOutputWithPast]:
"""
Perform generation process of the model.
Parameters
------------
inputs :
The sequence used as a prompt for the generation or as model inputs to the model.
When using vllm inference, this should be a string or a list of strings.
When using normal inference, this should be a tensor.
release_gpu : bool, optional
Whether to release the GPU resource after inference, by default False.
use_vllm : bool, optional
Whether to use VLLM for inference, by default False.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
"""
if use_vllm:
logger.warning(
"VLLM inference is not supported for text regression model, using normal inference instead."
)
use_vllm = False
if not self._activated:
self.activate_model_for_inference(
use_vllm=use_vllm,
**kwargs,
)
if use_vllm:
res = self.__vllm_inference(inputs, **kwargs)
else:
res = self.__inference(inputs, **kwargs)
if release_gpu:
self.deactivate_model_for_inference(use_vllm=use_vllm)
return res |
Perform generation process of the model.
Parameters
------------
inputs :
The sequence used as a prompt for the generation or as model inputs to the model.
When using vllm inference, this should be a string or a list of strings.
When using normal inference, this should be a tensor.
release_gpu : bool, optional
Whether to release the GPU resource after inference, by default False.
use_vllm : bool, optional
Whether to use VLLM for inference, by default False.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
| inference | python | OptimalScale/LMFlow | src/lmflow/models/hf_text_regression_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_text_regression_model.py | Apache-2.0 |
def __inference(
self,
inputs,
**kwargs
):
"""
Perform generation process of the model.
Parameters
------------
inputs :
The **tokenized** sequence used as a prompt for the generation or as model inputs to the model.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
"""
with torch.no_grad():
if self.use_accelerator:
outputs = self.backend_model(
input_ids=inputs,
**kwargs,
)
else:
if self.device == "gpu":
outputs = self.ds_engine.module(
input_ids=inputs,
synced_gpus=True,
**kwargs,
)
elif self.device == "cpu":
outputs = self.backend_model(
input_ids=inputs,
synced_gpus=True,
**kwargs,
)
else:
raise NotImplementedError(
f"device \"{self.device}\" is not supported"
)
if kwargs.get('return_input', False):
outputs = {"input": inputs, "output": outputs}
return outputs |
Perform generation process of the model.
Parameters
------------
inputs :
The **tokenized** sequence used as a prompt for the generation or as model inputs to the model.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
| __inference | python | OptimalScale/LMFlow | src/lmflow/models/hf_text_regression_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_text_regression_model.py | Apache-2.0 |
def __vllm_inference(
self,
inputs: Union[str, List[str]],
sampling_params: Optional['SamplingParams'] = None,
**kwargs,
) -> Union[List[List[str]], List[List[List[int]]]]:
"""Perform VLLM inference process of the model.
Parameters
----------
inputs : Union[str, List[str]]
Prompt(s), string or a list of strings.
sampling_params : Optional[SamplingParams], optional
vllm SamplingParams object, by default None.
Returns
-------
"""
raise NotImplementedError(
"VLLM inference is not supported for text regression model."
) | Perform VLLM inference process of the model.
Parameters
----------
inputs : Union[str, List[str]]
Prompt(s), string or a list of strings.
sampling_params : Optional[SamplingParams], optional
vllm SamplingParams object, by default None.
Returns
-------
| __vllm_inference | python | OptimalScale/LMFlow | src/lmflow/models/hf_text_regression_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_text_regression_model.py | Apache-2.0 |
def __init__(
self,
model_args,
*args,
**kwargs
):
"""
Initializes a TextRegressionModel instance.
:param model_args: dictionary with model arguments such as model name, path, revision, etc.
"""
self.inference_func = None |
Initializes a TextRegressionModel instance.
:param model_args: dictionary with model arguments such as model name, path, revision, etc.
| __init__ | python | OptimalScale/LMFlow | src/lmflow/models/text_regression_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/text_regression_model.py | Apache-2.0 |
def inference(self, inputs: Dataset):
"""
Gets regression results of a given dataset.
:inputs: Dataset object, only accept type "text_only".
"""
if self.inference_func is not None:
return self.inference_func(inputs)
else:
pass |
Gets regression results of a given dataset.
:inputs: Dataset object, only accept type "text_only".
| inference | python | OptimalScale/LMFlow | src/lmflow/models/text_regression_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/text_regression_model.py | Apache-2.0 |
def __init__(self,
config: Blip2Config,
image_encoder_name_or_path=None,
qformer_name_or_path=None,
language_model_name_or_path=None,
low_resource=False,):
'''
TODO update the docs
Args:
config:
# the below varaible are used to overwrite the model in config
image_encoder_name_or_path:
qformer_name_or_path:
language_model_name_or_path:
Returns:
'''
super(Blip2PreTrainedModel, self).__init__(config)
self.custom_vision_model = getattr(
config, "custom_vision_model", False)
self.with_qformer = getattr(config, "with_qformer", True)
# vision model
if self.custom_vision_model:
# custom vision model means the vit model customized from llava.
# vision_model_args = getattr(config, "vision_model_args", dict())
self.vision_model = build_vision_tower(config)
config.vision_config = self.vision_model.config
self.image_processor = self.vision_model.image_processor
elif image_encoder_name_or_path is not None:
# use the model from transformers
self.vision_model = AutoModel.from_pretrained(
image_encoder_name_or_path)
config.vision_config = self.vision_model.config
else:
# the default vit in Blip2
self.vision_model = Blip2VisionModel(config.vision_config)
if self.with_qformer:
# check if with qformer, the blip series model use qformer
# and the llava based models don't use qformer.
if qformer_name_or_path is not None:
self.query_tokens = nn.Parameter(
torch.zeros(1, config.num_query_tokens,
config.qformer_config.hidden_size))
self.qformer = AutoModel.from_pretrained(
qformer_name_or_path)
else:
self.query_tokens = nn.Parameter(
torch.zeros(1, config.num_query_tokens,
config.qformer_config.hidden_size))
self.qformer = Blip2QFormerModel(config.qformer_config)
kwargs = dict()
if language_model_name_or_path is not None:
if low_resource:
kwargs = dict(
torch_dtype=torch.float16,
load_in_8bit=True,
device_map="auto",
low_cpu_mem_usage=True)
else:
if not is_deepspeed_zero3_enabled:
kwargs = dict(device_map="auto",
torch_dtype=torch.float16)
language_model = AutoModelForCausalLM.from_pretrained(
language_model_name_or_path, **kwargs)
config.text_config = language_model.config
else:
if config.use_decoder_only_language_model:
language_model = AutoModelForCausalLM.from_config(
config.text_config, **kwargs)
else:
language_model = AutoModelForSeq2SeqLM.from_config(
config.text_config, **kwargs)
# Update _tied_weights_keys using the base model used.
if getattr(language_model, "_tied_weights_keys", None) is not None:
self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys]
self.language_model = language_model
if self.with_qformer:
self.language_projection = nn.Linear(
self.qformer.config.hidden_size,
self.language_model.config.hidden_size)
else:
self.language_projection = nn.Linear(
self.vision_model.hidden_size,
self.language_model.config.hidden_size)
if image_encoder_name_or_path is None and \
language_model_name_or_path is None:
self.post_init()
# for deepspeed
self.hidden_size = self.language_model.config.hidden_size
self.config.hidden_size = self.language_model.config.hidden_size |
TODO update the docs
Args:
config:
# the below varaible are used to overwrite the model in config
image_encoder_name_or_path:
qformer_name_or_path:
language_model_name_or_path:
Returns:
| __init__ | python | OptimalScale/LMFlow | src/lmflow/models/vision2seq_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/vision2seq_model.py | Apache-2.0 |
def register_prompt_cache(self, prompt_ids, prompt_keys_values):
"""
Udpate the prompt id and embedding for reuse in the future
Args:
prompt_ids (torch.LongTensor): The id of the prompt.
prompt_keys_values (torch.FloatTensor): The embedding of the prompt.
Returns:
None
"""
self.prompt_ids = prompt_ids
self.prompt_keys_values = prompt_keys_values
self.with_prompt_cache = True |
Udpate the prompt id and embedding for reuse in the future
Args:
prompt_ids (torch.LongTensor): The id of the prompt.
prompt_keys_values (torch.FloatTensor): The embedding of the prompt.
Returns:
None
| register_prompt_cache | python | OptimalScale/LMFlow | src/lmflow/models/vision2seq_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/vision2seq_model.py | Apache-2.0 |
def save_prompt_cache(self, path):
"""
Save prompt embedding and id.
Args:
path: The path to save the prompt embedding and id.
Returns:
None
"""
torch.save(
dict(
prompt_ids=self.prompt_ids,
prompt_keys_values=self.prompt_keys_values
),
path) |
Save prompt embedding and id.
Args:
path: The path to save the prompt embedding and id.
Returns:
None
| save_prompt_cache | python | OptimalScale/LMFlow | src/lmflow/models/vision2seq_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/vision2seq_model.py | Apache-2.0 |
def generate(
self,
pixel_values: torch.FloatTensor,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
image_token_indexes: Optional[List] = [0],
one_sample_multiple_images: Optional[bool] = False,
images: Optional[torch.LongTensor] = None,
**generate_kwargs,
) -> torch.LongTensor:
"""
Overrides `generate` function to be able to use the model as a conditional generator.
Args:
pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):
Input images to be processed.
input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
The sequence used as a prompt for the generation.
attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices
image_token_indexes (bool, *optional*):
The index for inserting the image tokens.
one_sample_multiple_images: (bool, *optional*):
The flag for inference that the input batch size is 1 and contain multiple images.
Returns:
captions (list): A list of strings of length batch_size * num_captions.
"""
if pixel_values is None and images is not None:
pixel_values = images
if not one_sample_multiple_images:
batch_size = pixel_values.shape[0]
else:
batch_size = 1
if not self.custom_vision_model:
# do the processing as blip2 and mini gpt-4;
image_embeds = self.vision_model(
pixel_values, return_dict=True).last_hidden_state
image_attention_mask = torch.ones(
image_embeds.size()[:-1],
dtype=torch.long,
device=image_embeds.device)
if self.with_qformer:
query_tokens = self.query_tokens.expand(
image_embeds.shape[0], -1, -1)
query_outputs = self.qformer(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_attention_mask,
return_dict=True,
)
else:
query_outputs = image_embeds
query_output = query_outputs.last_hidden_state
language_model_inputs = self.language_projection(query_output)
inputs_embeds, attention_mask = \
self.processor_image_token_in_minigpt4(
input_ids,
language_model_inputs,
attention_mask,
image_token_indexes,
pixel_values,
batch_size)
input_ids = None
else:
# do the processing in the vision model
# language is the causallm model.
# so use language model.model to do the embed_tokens
if pixel_values.dim() == 3:
# the batch dim is missing;
pixel_values = pixel_values[None]
input_ids, attention_mask, past_key_values, inputs_embeds, labels = \
self.vision_model.prepare_inputs_labels_for_multimodal(
input_ids, attention_mask,
None, None,
pixel_values,
self.language_projection,
self.language_model.model)
# convert the dtype.
# FIXME check when need to do this
inputs_embeds = inputs_embeds.to(
device=self.language_model.lm_head.weight.device)
inputs_embeds = inputs_embeds.to(
self.language_model.lm_head.weight.dtype)
outputs = self.language_model.generate(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
**generate_kwargs,
)
return outputs |
Overrides `generate` function to be able to use the model as a conditional generator.
Args:
pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):
Input images to be processed.
input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
The sequence used as a prompt for the generation.
attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices
image_token_indexes (bool, *optional*):
The index for inserting the image tokens.
one_sample_multiple_images: (bool, *optional*):
The flag for inference that the input batch size is 1 and contain multiple images.
Returns:
captions (list): A list of strings of length batch_size * num_captions.
| generate | python | OptimalScale/LMFlow | src/lmflow/models/vision2seq_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/vision2seq_model.py | Apache-2.0 |
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, images,
language_projection=None,
language_model=None,
**kwargs
):
'''
Copy from the LLAVA code base.
Should be polished.
'''
vision_tower = self.vision_tower
# commonly used in model.generate (past_key_values is not None)
# to avoid forward the image multiple time
if vision_tower is None or images is None or input_ids.shape[1] == 1:
if (past_key_values is not None and
vision_tower is not None and
images is not None and
input_ids.shape[1] == 1):
attention_mask = torch.ones((
attention_mask.shape[0],
past_key_values[-1][-1].shape[-2] + 1),
dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
concat_images = torch.cat([image for image in images], dim=0)
image_features = self.encode_images(concat_images, language_projection)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
image_features = [x.flatten(0, 1) for x in image_features]
else:
image_features = self.encode_images(images, language_projection)
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids):
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
# multimodal LLM, but the current sample is not multimodal
cur_input_embeds = language_model.embed_tokens(cur_input_ids)
cur_input_embeds = cur_input_embeds + (0. * language_projection(vision_tower.dummy_feature)).sum()
new_input_embeds.append(cur_input_embeds)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_image_idx += 1
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
cur_new_input_embeds = []
if labels is not None:
cur_labels = labels[batch_idx]
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0:
cur_image_features = image_features[cur_image_idx]
image_token_start = image_token_indices[0]
# print("image token_start", image_token_start,
# "curr_input_ids", cur_input_ids.shape)
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_new_input_embeds.append(language_model.embed_tokens(cur_input_ids[:image_token_start-1]).detach())
cur_new_input_embeds.append(language_model.embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
cur_new_input_embeds.append(language_model.embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])
cur_labels = cur_labels[image_token_start+2:]
else:
cur_input_ids = cur_input_ids.to(device=language_model.device)
cur_new_input_embeds.append(language_model.embed_tokens(cur_input_ids[:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_labels = cur_labels[image_token_start+1:]
cur_image_idx += 1
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_input_ids = cur_input_ids[image_token_start+2:]
else:
cur_input_ids = cur_input_ids[image_token_start+1:]
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
if cur_input_ids.numel() > 0:
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_new_input_embeds.append(language_model.embed_tokens(cur_input_ids).detach())
else:
cur_new_input_embeds.append(language_model.embed_tokens(cur_input_ids))
if labels is not None:
cur_new_labels.append(cur_labels)
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
new_input_embeds.append(cur_new_input_embeds)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0)
new_labels.append(cur_new_labels)
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
max_len = max(x.shape[0] for x in new_input_embeds)
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else:
new_input_embeds = torch.stack(new_input_embeds, dim=0)
if labels is not None:
new_labels = torch.stack(new_labels, dim=0)
if attention_mask is not None:
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, attention_mask, past_key_values, \
new_input_embeds, new_labels |
Copy from the LLAVA code base.
Should be polished.
| prepare_inputs_labels_for_multimodal | python | OptimalScale/LMFlow | src/lmflow/models/vision_encoder/clip_encoder.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/vision_encoder/clip_encoder.py | Apache-2.0 |
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# cast data type
half_precision = False
if p.data.dtype == torch.float16:
half_precision = True
p.data = p.data.float()
p.grad = p.grad.float()
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'AdaBelief does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
beta1, beta2 = group['betas']
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_var'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_var'] = torch.zeros_like(p.data)
# perform weight decay, check if decoupled weight decay
if self.weight_decouple:
if not self.fixed_decay:
p.data.mul_(1.0 - group['lr'] * group['weight_decay'])
else:
p.data.mul_(1.0 - group['weight_decay'])
else:
if group['weight_decay'] != 0:
grad.add_(p.data, alpha=group['weight_decay'])
# get current state variable
exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Update first and second moment running average
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
grad_residual = grad - exp_avg
exp_avg_var.mul_(beta2).addcmul_( grad_residual, grad_residual, value=1 - beta2)
if amsgrad:
max_exp_avg_var = state['max_exp_avg_var']
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
# update
if not self.rectify:
# Default update
step_size = group['lr'] / bias_correction1
p.data.addcdiv_( exp_avg, denom, value=-step_size)
else: # Rectified update, forked from RAdam
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
elif self.degenerated_to_sgd:
step_size = 1.0 / (1 - beta1 ** state['step'])
else:
step_size = -1
buffered[2] = step_size
if N_sma >= 5:
denom = exp_avg_var.sqrt().add_(group['eps'])
p.data.addcdiv_(exp_avg, denom, value=-step_size * group['lr'])
elif step_size > 0:
p.data.add_( exp_avg, alpha=-step_size * group['lr'])
if half_precision:
p.data = p.data.half()
p.grad = p.grad.half()
return loss | Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
| step | python | OptimalScale/LMFlow | src/lmflow/optim/adabelief.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/optim/adabelief.py | Apache-2.0 |
def step(self, closure = None):
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group, base_lr in zip(self.param_groups, self.base_lrs):
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
msg = (
"AdaBound does not support sparse gradients, "
"please consider SparseAdam instead"
)
raise RuntimeError(msg)
amsbound = group["amsbound"]
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
if amsbound:
# Maintains max of all exp. moving avg. of
# sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsbound:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
if group["weight_decay"] != 0:
grad = grad.add(p.data, alpha=group["weight_decay"])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsbound:
# Maintains the maximum of all 2nd moment running
# avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = (
group["lr"]
* math.sqrt(bias_correction2)
/ bias_correction1
)
# Applies bounds on actual learning rate
# lr_scheduler cannot affect final_lr, this is a workaround
# to apply lr decay
final_lr = group["final_lr"] * group["lr"] / base_lr
lower_bound = final_lr * (
1 - 1 / (group["gamma"] * state["step"] + 1)
)
upper_bound = final_lr * (
1 + 1 / (group["gamma"] * state["step"])
)
step_size = torch.full_like(denom, step_size)
step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(
exp_avg
)
p.data.add_(-step_size)
return loss | Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
| step | python | OptimalScale/LMFlow | src/lmflow/optim/adabound.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/optim/adabound.py | Apache-2.0 |
def step(self, closure = None):
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
beta1, beta2 = group["betas"]
nesterov = group["nesterov"]
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = torch.zeros_like(
p.data, memory_format=torch.preserve_format
)
state["exp_avg_sq"] = torch.zeros_like(
p.data, memory_format=torch.preserve_format
)
# Adam
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
state["step"] += 1
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(
group["eps"]
)
step_size = group["lr"] / bias_correction1
if nesterov:
perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom
else:
perturb = exp_avg / denom
# Projection
wd_ratio = 1
if len(p.shape) > 1:
perturb, wd_ratio = self._projection(
p,
grad,
perturb,
group["delta"],
group["wd_ratio"],
group["eps"],
)
# Weight decay
if group["weight_decay"] > 0:
p.data.mul_(
1 - group["lr"] * group["weight_decay"] * wd_ratio
)
# Step
p.data.add_(perturb, alpha=-step_size)
return loss | Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
| step | python | OptimalScale/LMFlow | src/lmflow/optim/adamp.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/optim/adamp.py | Apache-2.0 |
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
eps = group['eps']
beta1, beta2 = group['betas']
decay = group['weight_decay']
k = group['k']
r = group['r']
warmup_steps = group['warmup_steps']
weight_lr_power = group['weight_lr_power']
if k < warmup_steps:
sched = (k+1) / warmup_steps
else:
sched = 1.0
bias_correction2 = 1 - beta2 ** (k+1)
lr = group['lr']*sched*math.sqrt(bias_correction2)
lr_max = group['lr_max'] = max(lr, group['lr_max'])
weight = ((k+1)**r) * (lr_max**weight_lr_power)
weight_sum = group['weight_sum'] = group['weight_sum'] + weight
try:
ckp1 = weight/weight_sum
except ZeroDivisionError:
ckp1 = 0
if not group['train_mode']:
raise Exception("Not in train mode!")
active_p = [p for p in group['params'] if p.grad is not None]
for p in active_p:
if 'z' not in self.state[p]:
self.state[p]['z'] = torch.clone(p.data)
self.state[p]['exp_avg_sq'] = torch.zeros_like(p.data)
if group['foreach'] and len(active_p) > 0:
y, grad, exp_avg_sq, z = zip(*[(p.data,
p.grad,
self.state[p]['exp_avg_sq'],
self.state[p]['z'])
for p in active_p])
# Decay the first and second moment running average coefficient
torch._foreach_mul_(exp_avg_sq, beta2)
torch._foreach_addcmul_(exp_avg_sq, grad, grad, value=1-beta2)
denom = torch._foreach_sqrt(exp_avg_sq)
torch._foreach_add_(denom, eps)
# Normalize grad in-place for memory efficiency
torch._foreach_div_(grad, denom)
# Weight decay calculated at y
if decay != 0:
torch._foreach_add_(grad, y, alpha=decay)
# These operations update y in-place,
# without computing x explicitly.
torch._foreach_lerp_(y, z, weight=ckp1)
torch._foreach_add_(y, grad, alpha=lr*(beta1*(1-ckp1)-1))
# z step
torch._foreach_sub_(z, grad, alpha=lr)
else:
for p in active_p:
y = p.data # Notation to match theory
grad = p.grad.data
state = self.state[p]
z = state['z']
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1-beta2)
denom = exp_avg_sq.sqrt().add_(eps)
# Reuse grad buffer for memory efficiency
grad_normalized = grad.div_(denom)
# Weight decay calculated at y
if decay != 0:
grad_normalized.add_(y, alpha=decay)
# These operations update y in-place,
# without computing x explicitly.
y.lerp_(end=z, weight=ckp1)
y.add_(grad_normalized, alpha=lr*(beta1*(1-ckp1)-1))
# z step
z.sub_(grad_normalized, alpha=lr)
group['k'] = k+1
return loss | Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
| step | python | OptimalScale/LMFlow | src/lmflow/optim/adamw_schedule_free.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/optim/adamw_schedule_free.py | Apache-2.0 |
def step(self, closure: Callable=None):
"""
Performs a single optimization step.
Arguments:
closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError("Dummy does not support sparse gradients yet")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = torch.zeros_like(p)
state["exp_avg2"] = torch.zeros_like(p)
# v := exp_avg
# m := double_exp_avg
v, m = state["exp_avg"], state["exp_avg2"]
beta1, beta2 = group["betas"]
step_size = group["lr"]
state["step"] += 1
p.add_(m, alpha=-0.0)
if group["weight_decay"] > 0.0:
p.add_(p, alpha=(-group["lr"] * group["weight_decay"]))
return loss |
Performs a single optimization step.
Arguments:
closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss.
| step | python | OptimalScale/LMFlow | src/lmflow/optim/dummy.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/optim/dummy.py | Apache-2.0 |
def step(self, closure = None):
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
msg = (
"Lamb does not support sparse gradients, "
"please consider SparseAdam instead"
)
raise RuntimeError(msg)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# Paper v3 does not use debiasing.
if self.debias:
bias_correction = math.sqrt(1 - beta2 ** state["step"])
bias_correction /= 1 - beta1 ** state["step"]
else:
bias_correction = 1
# Apply bias to lr to avoid broadcast.
step_size = group["lr"] * bias_correction
weight_norm = torch.norm(p.data).clamp(0, self.clamp_value)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group["eps"])
if group["weight_decay"] != 0:
adam_step.add_(p.data, alpha=group["weight_decay"])
adam_norm = torch.norm(adam_step)
if weight_norm == 0 or adam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / adam_norm
state["weight_norm"] = weight_norm
state["adam_norm"] = adam_norm
state["trust_ratio"] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(adam_step, alpha=-step_size * trust_ratio)
return loss | Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
| step | python | OptimalScale/LMFlow | src/lmflow/optim/lamb.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/optim/lamb.py | Apache-2.0 |
def step(self, closure = None):
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# exclude scaling for params with 0 weight decay
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad
p_norm = torch.norm(p.data)
g_norm = torch.norm(p.grad.data)
# lars scaling + weight decay part
if weight_decay != 0:
if p_norm != 0 and g_norm != 0:
lars_lr = p_norm / (
g_norm + p_norm * weight_decay + group["eps"]
)
lars_lr *= group["trust_coefficient"]
d_p = d_p.add(p, alpha=weight_decay)
d_p *= lars_lr
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.clone(
d_p
).detach()
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group["lr"])
return loss | Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
| step | python | OptimalScale/LMFlow | src/lmflow/optim/lars.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/optim/lars.py | Apache-2.0 |
def zeropower_via_newtonschulz5(G: Tensor, steps: int) -> Tensor:
"""
Newton-Schulz iteration to compute the zeroth power / orthogonalization of G. We opt to use a
quintic iteration whose coefficients are selected to maximize the slope at zero. For the purpose
of minimizing steps, it turns out to be empirically effective to keep increasing the slope at
zero even beyond the point where the iteration no longer converges all the way to one everywhere
on the interval. This iteration therefore does not produce UV^T but rather something like US'V^T
where S' is diagonal with S_{ii}' ~ Uniform(0.5, 1.5), which turns out not to hurt model
performance at all relative to UV^T, where USV^T = G is the SVD.
"""
assert G.ndim >= 2 # batched Muon implementation by @scottjmaddox, and put into practice in the record by @YouJiacheng
a, b, c = (3.4445, -4.7750, 2.0315)
X = G.bfloat16()
if G.size(-2) > G.size(-1):
X = X.mT
# Ensure spectral norm is at most 1
X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7)
# Perform the NS iterations
for _ in range(steps):
A = X @ X.mT
B = b * A + c * A @ A # quintic computation strategy adapted from suggestion by @jxbz, @leloykun, and @YouJiacheng
X = a * X + B @ X
if G.size(-2) > G.size(-1):
X = X.mT
return X |
Newton-Schulz iteration to compute the zeroth power / orthogonalization of G. We opt to use a
quintic iteration whose coefficients are selected to maximize the slope at zero. For the purpose
of minimizing steps, it turns out to be empirically effective to keep increasing the slope at
zero even beyond the point where the iteration no longer converges all the way to one everywhere
on the interval. This iteration therefore does not produce UV^T but rather something like US'V^T
where S' is diagonal with S_{ii}' ~ Uniform(0.5, 1.5), which turns out not to hurt model
performance at all relative to UV^T, where USV^T = G is the SVD.
| zeropower_via_newtonschulz5 | python | OptimalScale/LMFlow | src/lmflow/optim/muon.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/optim/muon.py | Apache-2.0 |
def step(self, closure=None):
"""
Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
# Initialize state
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Update momentum and squared gradient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# Compute the update
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
# Orthogonalize the update
update = exp_avg / denom
if update.ndim >= 2:
update = zeropower_via_newtonschulz5(update, steps=group['ns_steps'])
# Apply the update
p.add_(update, alpha=-step_size)
# Apply weight decay
if group['weight_decay'] != 0:
p.add_(p, alpha=-group['lr'] * group['weight_decay'])
return loss |
Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
| step | python | OptimalScale/LMFlow | src/lmflow/optim/muon.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/optim/muon.py | Apache-2.0 |
def step(self, closure = None):
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
lr = group["lr"]
weight_decay = group["weight_decay"]
beta1, beta2 = group["betas"]
eps = group["eps"]
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
msg = (
"RAdam does not support sparse gradients, "
"please consider SparseAdam instead"
)
raise RuntimeError(msg)
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = torch.zeros_like(
p_data_fp32, memory_format=torch.preserve_format
)
state["exp_avg_sq"] = torch.zeros_like(
p_data_fp32, memory_format=torch.preserve_format
)
else:
state["exp_avg"] = state["exp_avg"].type_as(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].type_as(
p_data_fp32
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
state["step"] += 1
buffered = group["buffer"][int(state["step"] % 10)]
if state["step"] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state["step"]
beta2_t = beta2 ** state["step"]
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state["step"] * beta2_t / (
1 - beta2_t
)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = (
lr
* math.sqrt(
(1 - beta2_t)
* (N_sma - 4)
/ (N_sma_max - 4)
* (N_sma - 2)
/ N_sma
* N_sma_max
/ (N_sma_max - 2)
)
/ (1 - beta1 ** state["step"])
)
else:
step_size = lr / (1 - beta1 ** state["step"])
buffered[2] = step_size
if weight_decay != 0:
p_data_fp32.add_(p_data_fp32, alpha=-weight_decay * lr)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(eps)
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
else:
p_data_fp32.add_(exp_avg, alpha=-step_size)
p.data.copy_(p_data_fp32)
return loss | Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
| step | python | OptimalScale/LMFlow | src/lmflow/optim/radam.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/optim/radam.py | Apache-2.0 |
def step(self, closure = None):
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state["momentum"] = torch.zeros_like(
p.data, memory_format=torch.preserve_format
)
# SGD
buf = state["momentum"]
buf.mul_(momentum).add_(grad, alpha=1 - dampening)
if nesterov:
d_p = grad + momentum * buf
else:
d_p = buf
# Projection
wd_ratio = 1
if len(p.shape) > 1:
d_p, wd_ratio = self._projection(
p,
grad,
d_p,
group["delta"],
group["wd_ratio"],
group["eps"],
)
# Weight decay
if weight_decay != 0:
p.data.mul_(
1
- group["lr"]
* group["weight_decay"]
* wd_ratio
/ (1 - momentum)
)
# Step
p.data.add_(d_p, alpha=-group["lr"])
return loss | Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
| step | python | OptimalScale/LMFlow | src/lmflow/optim/sgdp.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/optim/sgdp.py | Apache-2.0 |
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
momentum = group['momentum']
lr = group['lr']
weight_decay = group['weight_decay']
k = group['k']
warmup_steps = group['warmup_steps']
if k < warmup_steps:
sched = (k+1) / warmup_steps
else:
sched = 1.0
lr = group['lr']*sched
weight_lr_power = group['weight_lr_power']
r = group['r']
lr_max = group['lr_max'] = max(lr, group['lr_max'])
weight = ((k+1)**r) * (lr_max**weight_lr_power)
weight_sum = group['weight_sum'] = group['weight_sum'] + weight
try:
ckp1 = weight/weight_sum
except ZeroDivisionError:
ckp1 = 0
if not group['train_mode']:
raise Exception("Not in train mode!")
active_p = [p for p in group['params'] if p.grad is not None]
for p in active_p:
if 'z' not in self.state[p]:
self.state[p]['z'] = torch.clone(p.data)
if group['foreach'] and len(active_p) > 0:
y, grad, z = zip(*[(p.data, p.grad, self.state[p]['z'])
for p in active_p])
# Apply weight decay
if weight_decay != 0:
torch._foreach_add_(grad, y, alpha=weight_decay)
# These operations update y in-place,
# without computing x explicitly.
torch._foreach_lerp_(y, z, weight=ckp1)
torch._foreach_add_(y, grad, alpha=lr*(momentum*(1-ckp1)-1))
# SGD step
torch._foreach_sub_(z, grad, alpha=lr)
else:
for p in active_p:
y = p.data # Notation to match theory
grad = p.grad.data
z = self.state[p]['z']
# Apply weight decay
if weight_decay != 0:
grad.add_(y, alpha=weight_decay)
# These operations update y in-place,
# without computing x explicitly.
y.lerp_(end=z, weight=ckp1)
y.add_(grad, alpha=lr*(momentum*(1-ckp1)-1))
# SGD step
z.sub_(grad, alpha=lr)
group['k'] = k+1
return loss | Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
| step | python | OptimalScale/LMFlow | src/lmflow/optim/sgd_schedule_free.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/optim/sgd_schedule_free.py | Apache-2.0 |
def step(self, closure = None):
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"Yogi does not support sparse gradients, "
"please consider SparseAdam instead"
)
state = self.state[p]
# State initialization
# Followed from official implementation in tensorflow addons:
# https://github.com/tensorflow/addons/blob/master/tensorflow_addons/optimizers/yogi.py#L118 # noqa
# For more details refer to the discussion:
# https://github.com/jettify/pytorch-optimizer/issues/77
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = nn.init.constant_(
torch.empty_like(
p.data, memory_format=torch.preserve_format
),
group["initial_accumulator"],
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = nn.init.constant_(
torch.empty_like(
p.data, memory_format=torch.preserve_format
),
group["initial_accumulator"],
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
if group["weight_decay"] != 0:
grad = grad.add(p.data, alpha=group["weight_decay"])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
grad_squared = grad.mul(grad)
exp_avg_sq.addcmul_(
torch.sign(exp_avg_sq - grad_squared),
grad_squared,
value=-(1 - beta2),
)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(
group["eps"]
)
step_size = group["lr"] / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
return loss | Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
| step | python | OptimalScale/LMFlow | src/lmflow/optim/yogi.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/optim/yogi.py | Apache-2.0 |
def convert_to_paired_dataset(
self,
source_dataset: Dataset,
sampling_paired_method: str="random",
length_penalty: float=0.0,
margin_scale: float=1.0,
use_fast: bool=False,
) -> Dataset:
"""Convert a scored one to multiple (text_to_scored_textlist) to a paired dataset by rejection sampling.
"""
output_dict = {
KEY_INSTANCES: []
}
if source_dataset.get_type() in ["text_to_scored_textlist"]:
output_dict[KEY_TYPE] = "paired_text_to_text"
for sample in tqdm(source_dataset.get_backend_dataset(), desc="Converting to paired dataset"):
sample_output_dict = {}
lengths = self._calc_response_lengths(sample["output"], source_dataset.get_type())
penalized_rewards = self._calc_reward_with_length_penalty(
rewards=[content[KEY_SCORE] for content in sample["output"]],
lengths=lengths,
length_penalty=length_penalty
)
chosen_idx, rejected_idx = self.sampling_paired_idx_from_rewards(
rewards=penalized_rewards,
sampling_paired_method=sampling_paired_method,
use_fast=use_fast
)
sample_output_dict["prompt"] = sample["input"]
sample_output_dict["chosen"] = sample["output"][chosen_idx]["text"]
sample_output_dict["rejected"] = sample["output"][rejected_idx]["text"]
sample_output_dict["margin"] = (sample["output"][chosen_idx][KEY_SCORE] - sample["output"][rejected_idx][KEY_SCORE]) * margin_scale
output_dict[KEY_INSTANCES].append(sample_output_dict)
output_dataset_args = copy.deepcopy(source_dataset.data_args)
output_dataset_args.dataset_path = None
output_dataset_args.dataset_name = f"paired_{output_dataset_args.dataset_name}"
output_dataset = Dataset(output_dataset_args)
output_dataset = output_dataset.from_dict(output_dict)
return output_dataset | Convert a scored one to multiple (text_to_scored_textlist) to a paired dataset by rejection sampling.
| convert_to_paired_dataset | python | OptimalScale/LMFlow | src/lmflow/pipeline/dpov2_aligner.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/dpov2_aligner.py | Apache-2.0 |
def _calc_reward_with_length_penalty(
self,
rewards: List[float],
lengths: List[int],
length_penalty: float,
) -> List[float]:
"""When length_penalty > 0, penalize the longer sequence by subtracting
length_penalty * length from the reward. Vice versa when length_penalty < 0.
"""
assert len(rewards) == len(lengths), "The number of rewards and lengths should be the same."
return [reward - length_penalty * length for reward, length in zip(rewards, lengths)] | When length_penalty > 0, penalize the longer sequence by subtracting
length_penalty * length from the reward. Vice versa when length_penalty < 0.
| _calc_reward_with_length_penalty | python | OptimalScale/LMFlow | src/lmflow/pipeline/dpov2_aligner.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/dpov2_aligner.py | Apache-2.0 |
def sampling_paired_idx_from_rewards(
self,
rewards: List[float],
sampling_paired_method: str="random",
use_fast: bool=False,
) -> Tuple[int, int]:
"""Prepare the dataset for DPO training by rejection sampling.
We implement different strategies to select pairs, including
random: randomly select two instances
max_min: best v.s. worst
max_max: best v.s. second best
max_random: best v.s. random from the remaining
"""
if use_fast:
return self._sampling_paired_idx_from_rewards_fast(rewards, sampling_paired_method)
else:
return self._sampling_paired_idx_from_rewards(rewards, sampling_paired_method) | Prepare the dataset for DPO training by rejection sampling.
We implement different strategies to select pairs, including
random: randomly select two instances
max_min: best v.s. worst
max_max: best v.s. second best
max_random: best v.s. random from the remaining
| sampling_paired_idx_from_rewards | python | OptimalScale/LMFlow | src/lmflow/pipeline/dpov2_aligner.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/dpov2_aligner.py | Apache-2.0 |
def get_paired_dataset(
data_root: str,
data_dir: str,
sanity_check: bool = False,
cache_dir: Optional[str] = None,
num_proc=24,
) -> Dataset:
"""Load dataset and convert it to the necessary format.
The dataset is converted to a dictionary with the following structure:
{
'prompt': List[str],
'chosen': List[str],
'rejected': List[str],
}
Prompts are structured as follows:
"Question: " + <prompt> + "\n\nAnswer: "
"""
data_path = Path(data_root) / data_dir
data_files = [
x.absolute().as_posix()
for x in data_path.glob("*.json")
]
dataset = load_dataset(
path=data_root,
split="train",
data_files=data_files,
cache_dir=cache_dir,
)
original_columns = dataset.column_names
if sanity_check:
dataset = dataset.select(range(min(len(dataset), 1000)))
def return_prompt_and_responses(samples) -> Dict[str, str]:
return {
"prompt": ["Question: " + question + "\n\nAnswer: " for question in samples["question"]],
"chosen": samples["response_j"],
"rejected": samples["response_k"],
}
return dataset.map(
return_prompt_and_responses,
batched=True,
num_proc=num_proc,
remove_columns=original_columns,
) | Load dataset and convert it to the necessary format.
The dataset is converted to a dictionary with the following structure:
{
'prompt': List[str],
'chosen': List[str],
'rejected': List[str],
}
Prompts are structured as follows:
"Question: " + <prompt> + "
Answer: "
| get_paired_dataset | python | OptimalScale/LMFlow | src/lmflow/pipeline/dpo_aligner.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/dpo_aligner.py | Apache-2.0 |
def evaluate(
self,
model,
dataset: Dataset,
metric = "accuracy",
verbose=True,
):
"""
Perform Evaluation for a model
Parameters
------------
model : TunableModel object.
TunableModel to perform inference
dataset : Dataset object.
"""
if metric in ["acc", "accuracy"]:
if self.evaluator_args.use_accelerator_for_evaluator:
acc = self._evaluate_acc_with_accelerator(model, dataset, verbose=verbose)
else:
acc = self._evaluate_acc_with_deepspeed(model, dataset, verbose=verbose)
print(f"Evaluating final accuracy: {acc}")
return acc
elif metric in ["ppl", "perplexity"]:
ppl = self._evaluate_ppl(model, dataset, verbose=verbose)
print(f"Evaluating final perplexity: {ppl}")
return ppl
elif metric in ["nll", "neg_log_likelihood"]:
nll = self._evaluate_nll(model, dataset, verbose=verbose)
print(f"Evaluating final negative log likelihood: {nll}")
return nll
else:
raise NotImplementedError(f"metric {metric} is not supported") |
Perform Evaluation for a model
Parameters
------------
model : TunableModel object.
TunableModel to perform inference
dataset : Dataset object.
| evaluate | python | OptimalScale/LMFlow | src/lmflow/pipeline/evaluator.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/evaluator.py | Apache-2.0 |
def _evaluate_nll(
self,
model,
dataset: Dataset,
verbose=True,
):
"""
Evaluates negative log likelihood of the model over a dataset.
NLL = -1/N sum_{i=1}^N sum_{j=1}^|w_i| ln(p(w_{i,j}|context_window)),
where N is the number of data samples, w_{i,j} is the j-th token in
i-th sample. Here "context_window" = p(w_{i,start}, w_{i,start+1}, ...,
p_{i,j-1} with start = max(0, j - window_length + 1). "window_length"
is normally the maximum length accepted by the model.
Returns:
A float which represents the negative log likelihood.
"""
data_dict = dataset.to_dict()
# Handles prompt structure
if dataset.get_type() == "text2text":
prompt = self.evaluator_args.prompt_structure
data_dict["instances"] = [
{
"input": prompt.format(input=instance["input"]),
"output": instance["output"]
}
for instance in data_dict["instances"]
]
dataset = dataset.from_dict(data_dict)
tokenized_dataset = model.tokenize(dataset, add_special_tokens=False)
tokenized_dataset = tokenized_dataset.get_backend_dataset()
encoding_list = [
{
"input_ids": torch.tensor([input_ids]),
"labels": torch.tensor([labels]),
}
for input_ids, labels in zip(tokenized_dataset["input_ids"],
tokenized_dataset["labels"])
]
# Gets context window length
try:
max_length = min(model.get_backend_model().config.n_positions,
model.get_max_length())
except:
max_length = min(1024, model.get_max_length())
nlls = []
full_nlls = []
num_samples = len(encoding_list)
for sample_idx, encodings in enumerate(encoding_list):
seq_len = encodings["input_ids"].size(1)
prev_end_loc = 0
for begin_loc in range(0, seq_len, self.block_size):
end_loc = min(begin_loc + max_length, seq_len)
# may be different from block_size on last loop
trg_len = end_loc - prev_end_loc
input_ids = encodings["input_ids"][:, begin_loc:end_loc]
input_ids = input_ids.to(device=self.local_rank)
labels = encodings["labels"][:, begin_loc:end_loc]
target_ids = labels.clone()
full_target_ids = input_ids.clone()
def get_nll(label_ids, nll_list):
label_ids[:, :-trg_len] = -100
label_ids = label_ids.to(device=self.local_rank)
# Valid labels are from 0 to `vocab_size`
num_valid_labels = torch.count_nonzero(label_ids >= 0)
if label_ids[0, 0] != -100:
num_valid_labels -= 1
if not torch.all(label_ids == -100):
with torch.no_grad():
outputs = model.get_backend_model()(
input_ids, labels=label_ids
)
# loss is calculated using CrossEntropyLoss which
# sums over valid labels N.B. the model only
# calculates loss over trg_len - 1 labels, because
# it internally shifts the labels to the left by 1.
neg_log_likelihood = outputs.loss * num_valid_labels
else:
neg_log_likelihood = torch.zeros([]).to(
device=self.local_rank
)
nll_list.append(neg_log_likelihood)
get_nll(target_ids, nlls)
get_nll(full_target_ids, full_nlls)
current_output_nll = torch.stack(nlls).sum() / (sample_idx + 1)
current_full_nll = torch.stack(full_nlls).sum() / (sample_idx + 1)
prev_end_loc = end_loc
if verbose:
if dataset.get_type() == "text_only":
print(
f"Evaluating negative log likelihood:"
f" {sample_idx + 1} / {num_samples} Complete,"
f" current nll: {current_full_nll}"
)
elif dataset.get_type() == "text2text":
print(
f"Evaluating negative log likelihood:"
f" {sample_idx + 1} / {num_samples} Complete,"
f" current full nll / input nll / output nll:"
f" {current_full_nll} /"
f" {current_full_nll - current_output_nll} /"
f" {current_output_nll}"
)
else:
raise NotImplementedError(
"f{dataset.get_type()} typed datasets are not"
" supported"
)
if end_loc == seq_len:
break
mean_nll = torch.stack(nlls).sum() / num_samples
return mean_nll |
Evaluates negative log likelihood of the model over a dataset.
NLL = -1/N sum_{i=1}^N sum_{j=1}^|w_i| ln(p(w_{i,j}|context_window)),
where N is the number of data samples, w_{i,j} is the j-th token in
i-th sample. Here "context_window" = p(w_{i,start}, w_{i,start+1}, ...,
p_{i,j-1} with start = max(0, j - window_length + 1). "window_length"
is normally the maximum length accepted by the model.
Returns:
A float which represents the negative log likelihood.
| _evaluate_nll | python | OptimalScale/LMFlow | src/lmflow/pipeline/evaluator.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/evaluator.py | Apache-2.0 |
def group_text(self, tokenized_datasets, model_max_length):
"""
Groups texts together to form blocks of maximum length `model_max_length` and returns the processed data as
a dictionary.
"""
data_args = self.data_args
finetuner_args = self.finetuner_args
if data_args.block_size is None:
block_size = model_max_length
if block_size > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is"
" longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size`"
" up to `tokenizer.model_max_length` you can override this "
" default with `--block_size xxx`."
)
block_size = 1024
else:
if data_args.block_size > model_max_length:
if self.model_args.truncate_to_model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger"
f" than the maximum length for the model"
f"({model_max_length})."
f" Using block_size={model_max_length}."
f"If you would like to use a longer 'block_size' that is"
f" longer than the maximum length supported by the model,"
f" you can override this behavior with"
f"default with `--truncate_to_model_max_length False`."
)
block_size = model_max_length
else:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger"
f"than the maximum length for the model"
f"({model_max_length})."
f"Using block_size={data_args.block_size}.")
block_size = data_args.block_size
else:
block_size = data_args.block_size
# Main data processing function that will concatenate all texts from
# our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model
# supported it instead of this drop, you can customize this part to
# your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts
# together, so group_texts throws away a remainder for each of those
# groups of 1,000 texts. You can adjust that batch_size here but a
# higher value might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation
# of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
with finetuner_args.main_process_first(desc="grouping texts together"):
group_batch_size = data_args.group_texts_batch_size
if data_args.disable_group_texts:
group_batch_size = 1
if not data_args.streaming:
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
batch_size=group_batch_size,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {block_size}",
)
else:
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
batch_size=group_batch_size,
)
return lm_datasets |
Groups texts together to form blocks of maximum length `model_max_length` and returns the processed data as
a dictionary.
| group_text | python | OptimalScale/LMFlow | src/lmflow/pipeline/finetuner.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/finetuner.py | Apache-2.0 |
def tune(self,
model: Union[HFDecoderModel, HFTextRegressionModel, HFEncoderDecoderModel],
dataset: Dataset,
transform_dataset_in_place=True,
data_collator=None):
"""
Perform tuning for a model
Parameters
------------
model : TunableModel object.
TunableModel to perform tuning.
dataset:
dataset to train model.
"""
model_args = self.model_args
data_args = self.data_args
finetuner_args = self.finetuner_args
if not transform_dataset_in_place:
dataset = copy.deepcopy(dataset)
# Tokenization and text grouping must be done in the main process
if dataset.backend == "custom_multi_modal":
dataset.backend_dataset.register_tokenizer(
model.tokenizer, model.image_processor)
lm_dataset = dataset
else:
with finetuner_args.main_process_first(desc="dataset map tokenization"):
tokenized_dataset = model.tokenize(dataset)
if data_args.disable_group_texts:
lm_dataset = tokenized_dataset
else:
lm_dataset = self.group_text(
tokenized_dataset,
model_max_length=model.get_max_length(),
)
train_dataset = lm_dataset.get_backend_dataset()
logger.info(f"Number of train samples: {len(train_dataset)}")
if finetuner_args.do_eval:
eval_dataset_args = deepcopy(data_args)
eval_dataset_args.dataset_path = finetuner_args.eval_dataset_path
eval_dataset = Dataset(eval_dataset_args)
with finetuner_args.main_process_first(desc="dataset map tokenization"):
tokenized_dataset = model.tokenize(eval_dataset)
if data_args.disable_group_texts:
lm_dataset = tokenized_dataset
else:
lm_dataset = self.group_text(
tokenized_dataset,
model_max_length=model.get_max_length(),
)
eval_dataset = lm_dataset.get_backend_dataset()
logger.info(f"Number of eval samples: {len(eval_dataset)}")
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
# like past_key_values, but logits always come first
logits = logits[0]
return logits.argmax(dim=-1)
metric = evaluate.load("accuracy")
def compute_metrics(eval_preds):
preds, labels = eval_preds
# preds have the same shape as the labels, after the argmax(-1) has been calculated
# by preprocess_logits_for_metrics but we need to shift the labels
labels = labels[:, 1:].reshape(-1)
preds = preds[:, :-1].reshape(-1)
return metric.compute(predictions=preds, references=labels)
if finetuner_args.do_train:
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
# Initialize our Trainer
training_args = finetuner_args
if model_args.use_lora:
FinetuningTrainer = PeftTrainer
trainer_callbacks = [PeftSavingCallback]
else:
FinetuningTrainer = Trainer
trainer_callbacks = []
if data_collator is None:
data_collator = default_data_collator
if training_args.use_customized_optim:
BaseTrainer = FinetuningTrainer
FinetuningTrainer = self.create_customized_optimizer(
BaseTrainer, model_args
)
if training_args.use_lisa:
class DynamicLayerActivationCallback(TrainerCallback):
def __init__(self, n_layers, interval_steps, model):
super().__init__()
self.n_layers = n_layers
self.interval_steps = interval_steps
self.model = model
# Determine the way to access layers based on the model type
class_to_layers_map = {
'LlamaForCausalLM': 'model.model.layers',
'Qwen2ForCausalLM': 'model.model.layers',
'MistralForCausalLM': 'model.model.layers',
'MixtralForCausalLM': 'model.model.layers',
'GemmaForCausalLM': 'model.model.layers',
'GPT2LMHeadModel': 'model.transformer.h',
'HymbaForCausalLM': 'model.model.layers',
}
model_class_name = self.model.__class__.__name__
if model_class_name in class_to_layers_map:
self.layers_attribute = class_to_layers_map[model_class_name]
else:
self.layers_attribute = training_args.lisa_layers_attribute
self.total_layers = len(eval('self.' + self.layers_attribute)) # Dynamically execute to get the number of layers
self.active_layers_indices = []
def freeze_all_layers(self):
layers = eval('self.' + self.layers_attribute) # Dynamically execute to get layers
for layer in layers:
for param in layer.parameters():
param.requires_grad = False
def on_step_begin(self, args, state, control, **kwargs):
# Check if it's time to switch active layers, including at step 0
if state.global_step % self.interval_steps == 0:
self.switch_active_layers()
def switch_active_layers(self):
# First, disable gradients for all layers
self.freeze_all_layers()
# Randomly select n_layers to activate
layers = eval('self.' + self.layers_attribute) # Re-fetch layer references
self.active_layers_indices = np.random.choice(range(self.total_layers), self.n_layers, replace=False)
print(f"Activating layers at indices: {self.active_layers_indices} for the next steps.", flush=True)
# Enable gradients only for the selected layers
for idx in self.active_layers_indices:
for param in layers[idx].parameters():
param.requires_grad = True
# Instantiate the callback
dynamic_layer_activation_callback = DynamicLayerActivationCallback(
n_layers=training_args.lisa_activated_layers, # Number of layers to activate
interval_steps=training_args.lisa_interval_steps, # Step interval to update active layers
model=model.get_backend_model()
)
trainer_callbacks.append(dynamic_layer_activation_callback)
trainer = FinetuningTrainer(
model=model.get_backend_model(),
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=model.get_tokenizer(),
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.do_eval else None,
preprocess_logits_for_metrics=preprocess_logits_for_metrics if training_args.do_eval else None,
callbacks=trainer_callbacks
)
# Training
if training_args.do_train:
checkpoint = None
last_checkpoint = self.last_checkpoint
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
if not model_args.use_lora:
trainer.save_model() # Saves the tokenizer too for easy upload
else:
if model_args.save_aggregated_lora:
model.merge_lora_weights()
model.save(finetuner_args.output_dir, model_args.save_aggregated_lora)
# save language_projection for multi-modal model;
if self.finetuner_args.save_language_projection:
language_projection_state = trainer.model.language_projection.state_dict()
torch.save(
osp.join(
self.finetuner_args.output_dir,
"language_projection.pth"),
language_projection_state)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return model |
Perform tuning for a model
Parameters
------------
model : TunableModel object.
TunableModel to perform tuning.
dataset:
dataset to train model.
| tune | python | OptimalScale/LMFlow | src/lmflow/pipeline/finetuner.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/finetuner.py | Apache-2.0 |
def create_dataloader(self, dataset: Dataset):
r"""Batchlize dataset and format it to dataloader.
Args:
dataset (Dataset): the dataset object
Output:
dataloader (batchlize): the dataloader object
dataset_size (int): the length of the dataset
"""
if dataset.get_type() == "text_only":
data_dict = dataset.to_dict()
inputs = [instance["text"] for instance in data_dict["instances"] ]
elif dataset.get_type() == "image_text":
inputs = dataset.to_list()
dataset_size = len(inputs)
dataset_buf = []
for idx in range(dataset_size):
dataset_buf.append({
"input": inputs[idx],
"input_idx": idx
})
dataloader = batchlize(
dataset_buf,
batch_size=1,
random_shuffle=False,
)
return dataloader, dataset_size | Batchlize dataset and format it to dataloader.
Args:
dataset (Dataset): the dataset object
Output:
dataloader (batchlize): the dataloader object
dataset_size (int): the length of the dataset
| create_dataloader | python | OptimalScale/LMFlow | src/lmflow/pipeline/inferencer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/inferencer.py | Apache-2.0 |
def inference(
self,
model,
dataset: Dataset,
max_new_tokens: int=100,
temperature: float=0.0,
prompt_structure: str='{input}',
remove_image_flag: bool=False,
chatbot_type: str="mini_gpt",
):
"""
Perform inference for a model
Parameters
------------
model : TunableModel object.
TunableModel to perform inference
dataset : Dataset object.
Returns:
output_dataset: Dataset object.
"""
if dataset.get_type() not in supported_dataset_type:
raise NotImplementedError(
'input dataset should have type {}'.format(
supported_dataset_type))
dataloader, data_size = self.create_dataloader(dataset)
# The output dataset
output_dict = {
"type": "text_only",
"instances": [
]
}
for batch_index, batch in enumerate(dataloader):
current_batch = batch[0] # batch size is 1
if isinstance(current_batch['input'], str):
input = prompt_structure.format(input=current_batch['input'])
else:
input = current_batch['input']
input['text'] = prompt_structure.format(input=input['text'])
if False and 'images' in input and isinstance(input['images'], list):
input['images'] = np.array(input['images'])
if remove_image_flag:
# remove the image flag <ImageHere> in tokenization;
if chatbot_type == "mini_gpt":
image_split_flag = "<ImageHere>"
elif chatbot_type:
image_split_flag = "<image>"
else:
raise NotImplementedError
input['text'] = input['text'].split(image_split_flag)
# TODO remove this code by update the tokenizer
input_ids = []
attention_mask = []
image_token_indexes = []
temp_input = copy.deepcopy(input)
for idx in range(len(input['text'])):
temp_input['text'] = input['text'][idx]
temp_inputs = model.encode(
temp_input,
return_tensors="pt",
add_special_tokens=idx == 0
).to(device=self.local_rank)
input_ids.append(temp_inputs['input_ids'])
attention_mask.append(temp_inputs['attention_mask'])
if chatbot_type == "llava":
# add the flag for inserting the image.
# TODO should merge the way of handling image flag in minigpt and llava.
index_tensor = torch.tensor(
[IMAGE_TOKEN_INDEX]
).to(device=self.local_rank)
index_tensor = index_tensor.reshape(1, 1)
input_ids.append(index_tensor)
attention_mask.append(
torch.ones(1,1).to(device=self.local_rank))
image_token_indexes.append(
temp_inputs["input_ids"].shape[1])
if len(image_token_indexes) > 1:
image_token_indexes = image_token_indexes[:-1]
if chatbot_type == "llava":
input_ids = input_ids[:-1]
attention_mask = attention_mask[:-1]
inputs = temp_inputs
inputs["input_ids"] = torch.cat(input_ids, dim=1)
inputs["attention_mask"] = torch.cat(attention_mask, dim=1)
else:
if self.inferencer_args.device == "gpu":
inputs = model.encode(
input, return_tensors="pt"
).to(device=self.local_rank)
elif self.inferencer_args.device == "cpu":
inputs = model.encode(
input, return_tensors="pt"
).to(device='cpu')
else:
raise NotImplementedError(
f"device \"{self.inferencer_args.device}\" is not supported"
)
if self.inferencer_args.use_accelerator:
inputs = inputs.to(self.accelerator.device)
if remove_image_flag:
inputs["image_token_indexes"] = image_token_indexes
inputs["one_sample_multiple_images"] = True
if self.inferencer_args.use_accelerator:
with self.accelerator.autocast():
outputs = model.inference(
inputs,
max_new_tokens=max_new_tokens,
temperature=self.inferencer_args.temperature,
repetition_penalty=self.inferencer_args.repetition_penalty,
do_sample=self.inferencer_args.do_sample,
use_accelerator=True,
)
else:
outputs = model.inference(
inputs,
max_new_tokens=max_new_tokens,
temperature=self.inferencer_args.temperature,
repetition_penalty=self.inferencer_args.repetition_penalty,
do_sample=self.inferencer_args.do_sample,
)
# only return the generation, trucating the input
if self.model_args.arch_type != "vision_encoder_decoder":
text_out = model.decode(outputs[0], skip_special_tokens=True)
prompt_length = len(model.decode(inputs[0], skip_special_tokens=True,))
text_out = text_out[prompt_length:]
else:
# to avoid redundant/missing leading space problem, we use a
# part of the input text
input_text = inputs['input_ids'][0][-1:]
text_out = model.decode(torch.cat([input_text, outputs[0]]), skip_special_tokens=True)
prompt_length = len(model.decode(input_text, skip_special_tokens=True,))
text_out = text_out[prompt_length:]
output_dict["instances"].append({ "text": text_out })
output_dataset = Dataset(DatasetArguments(dataset_path = None))
output_dataset = output_dataset.from_dict(output_dict)
return output_dataset |
Perform inference for a model
Parameters
------------
model : TunableModel object.
TunableModel to perform inference
dataset : Dataset object.
Returns:
output_dataset: Dataset object.
| inference | python | OptimalScale/LMFlow | src/lmflow/pipeline/inferencer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/inferencer.py | Apache-2.0 |
def score_to_prob(scores: torch.Tensor,
temperature: float = 0.,
top_p: float = 1.,) -> torch.Tensor:
"""Convert scores (NOT softmaxed tensor) to probabilities with support for temperature, top-p sampling, and argmax.
Parameters
----------
scores : torch.Tensor
Input scores.
temperature : float, optional
Temperature parameter for controlling randomness. Higher values make the distribution more uniform,
lower values make it peakier. When temperature <= 1e-6, argmax is used. by default 0.0
top_p : float, optional
Top-p sampling parameter for controlling the cumulative probability threshold, by default 1.0 (no threshold)
Returns
-------
torch.Tensor
Probability distribution after adjustments.
"""
assert temperature >= 0.0
assert 0.0 < top_p <= 1.0
if temperature <= 1e-6:
final_prob = F.one_hot(scores.argmax(dim=1), num_classes=scores.size(1)).float()
else:
scores /= temperature
if top_p < 1.0:
sorted_scores, _ = torch.sort(scores, descending=True)
probs = sorted_scores.softmax(dim=1)
cumulative_probs = torch.cumsum(probs, dim=1)
mask = cumulative_probs <= top_p
if mask.any():
thresholded_probs = probs * mask
thresholded_probs = thresholded_probs / thresholded_probs.sum(dim=1, keepdim=True)
final_prob = torch.zeros_like(scores)
final_prob.scatter_add_(1, sorted_scores.argsort(dim=1), thresholded_probs)
else:
final_prob = scores.softmax(dim=1)
else:
final_prob = scores.softmax(dim=1)
return final_prob | Convert scores (NOT softmaxed tensor) to probabilities with support for temperature, top-p sampling, and argmax.
Parameters
----------
scores : torch.Tensor
Input scores.
temperature : float, optional
Temperature parameter for controlling randomness. Higher values make the distribution more uniform,
lower values make it peakier. When temperature <= 1e-6, argmax is used. by default 0.0
top_p : float, optional
Top-p sampling parameter for controlling the cumulative probability threshold, by default 1.0 (no threshold)
Returns
-------
torch.Tensor
Probability distribution after adjustments.
| score_to_prob | python | OptimalScale/LMFlow | src/lmflow/pipeline/inferencer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/inferencer.py | Apache-2.0 |
def predict_next_token(model: HFDecoderModel, input_ids: torch.Tensor, num_new_tokens: int = 1):
"""Predict the next token given the input_ids.
"""
output = model.inference(input_ids,
use_accelerator=True,
max_new_tokens=num_new_tokens,
return_dict_in_generate=True,
output_scores=True,
do_sample=True,
num_beams=1)
return output | Predict the next token given the input_ids.
| predict_next_token | python | OptimalScale/LMFlow | src/lmflow/pipeline/inferencer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/inferencer.py | Apache-2.0 |
def autoregressive_sampling(self,
input_ids: torch.Tensor,
model: HFDecoderModel,
temperature: float = 0.,
num_new_tokens: int = 5) -> Dict:
"""Ref: [arXiv:2211.17192v2](https://arxiv.org/abs/2211.17192) Section 2.2
"""
sequence = input_ids
new_tokens = []
for _ in range(num_new_tokens):
pred = self.predict_next_token(model=model, input_ids=sequence, num_new_tokens=1) # predict next one token
prob = self.score_to_prob(pred.scores[0], temperature=temperature)
sampled = self.sample(prob=prob, num_samples=1)
new_tokens.append(sampled)
sequence = torch.cat([sequence, sampled['sampled_token']], dim=1)
return {"sequence": sequence, "new_tokens": new_tokens} | Ref: [arXiv:2211.17192v2](https://arxiv.org/abs/2211.17192) Section 2.2
| autoregressive_sampling | python | OptimalScale/LMFlow | src/lmflow/pipeline/inferencer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/inferencer.py | Apache-2.0 |
def inference(
self,
model: HFDecoderModel,
input: str,
max_new_tokens: int=1024,
):
"""
Perform inference for a model
Parameters
------------
model : HFDecoderModel object.
TunableModel to perform inference
input : str.
The input text (i.e., the prompt) for the model.
max_new_tokens : int.
The maximum number of tokens to be generated by the model.
Returns:
output : str.
The output text generated by the model.
"""
if self.inferencer_args.device == "gpu":
input_id = model.encode(input, return_tensors="pt").to(device=self.local_rank)
elif self.inferencer_args.device == "cpu":
input_id = model.encode(input, return_tensors="pt").to(device='cpu')
logger.debug(f"input_id: {input_id}")
input_length = input_id.shape[1]
output_id = model.inference(
input_id,
use_accelerator=True,
max_new_tokens=max_new_tokens,
# pad_token_id=model.tokenizer.eos_token_id,
)
# logger.debug(f"output: {output_id}")
output = model.decode(output_id[0])
output = output.replace(input,"")
return output |
Perform inference for a model
Parameters
------------
model : HFDecoderModel object.
TunableModel to perform inference
input : str.
The input text (i.e., the prompt) for the model.
max_new_tokens : int.
The maximum number of tokens to be generated by the model.
Returns:
output : str.
The output text generated by the model.
| inference | python | OptimalScale/LMFlow | src/lmflow/pipeline/inferencer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/inferencer.py | Apache-2.0 |
def _initialize_trainer(self, model, tokenizer, training_args):
"""
This function takes the model and tokenizer as the input and initialize the trainer.
"""
trainer = RaftTrainer(
model=model,
args=training_args,
train_dataset=Dataset.from_dict({"text": [ " " ] }),
eval_dataset=Dataset.from_dict({}),
tokenizer=tokenizer,
data_collator=default_data_collator,
compute_metrics=None,
preprocess_logits_for_metrics=None,
)
return trainer |
This function takes the model and tokenizer as the input and initialize the trainer.
| _initialize_trainer | python | OptimalScale/LMFlow | src/lmflow/pipeline/raft_aligner.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/raft_aligner.py | Apache-2.0 |
def _load_dataset(
self,
selected_dataset,
model,
tokenizer,
model_args,
data_args,
training_args,
):
'''
This function prepares the dataset for every iteration.
'''
raw_datasets = selected_dataset
if training_args.do_train:
column_names = list(raw_datasets["train"].features)
else:
column_names = list(raw_datasets["validation"].features)
text_column_name = "text" if "text" in column_names else column_names[0]
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples[text_column_name])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits"
" before being passed to the model."
)
return output
with training_args.main_process_first(desc="dataset map tokenization"):
if not data_args.streaming:
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset",
)
else:
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
remove_columns=column_names,
)
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`."
)
block_size = 512
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
with training_args.main_process_first(desc="grouping texts together"):
group_batch_size = 1000
if data_args.disable_group_texts:
group_batch_size = 1
if not data_args.streaming:
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
batch_size=group_batch_size,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {block_size}",
)
else:
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
batch_size=group_batch_size,
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = lm_datasets["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
return train_dataset |
This function prepares the dataset for every iteration.
| _load_dataset | python | OptimalScale/LMFlow | src/lmflow/pipeline/raft_aligner.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/raft_aligner.py | Apache-2.0 |
def _load_input_dataset(self, dataset, tokenizer):
"""
Load input dataset (i.e. prompt/question dataset) for training.
Args:
dataset: A Dataset object.
The dataset to be loaded.
Returns:
dataloader (`torch.utils.data.DataLoader`):
The dataloader for the dataset.
"""
ds = dataset.get_backend_dataset()
def tokenize(sample):
sample["input_ids"] = tokenizer.encode(sample["text"])
sample['input'] = tokenizer.decode(sample["input_ids"])
return sample
ds = ds.map(tokenize, batched=False)
ds = ds.filter(lambda x: len(x["input_ids"]) <= 256)
ds.set_format(type='torch')
return ds |
Load input dataset (i.e. prompt/question dataset) for training.
Args:
dataset: A Dataset object.
The dataset to be loaded.
Returns:
dataloader (`torch.utils.data.DataLoader`):
The dataloader for the dataset.
| _load_input_dataset | python | OptimalScale/LMFlow | src/lmflow/pipeline/raft_aligner.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/raft_aligner.py | Apache-2.0 |
def align(self, model, dataset, reward_model):
"""
Perform alignment for a model
Parameters
------------
model : BaseModel object.
dataset: Dataset object.
Input dataset for model to generate outputs. The input and output
will then be feed into reward model to get the reward for
alignment.
reward_model: RegressionModel object.
"""
tokenizer = model.get_tokenizer()
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
tokenizer.padding_side = "left"
dataset = self._load_input_dataset(dataset, tokenizer)
set_caching_enabled(False)
wrapped_model = model
model = model.get_backend_model()
generation_kwargs = {
"min_length": 1,
"top_k": 0.0,
"top_p": 1.0,
"do_sample": True,
"pad_token_id": tokenizer.eos_token_id,
"temperature":0.85,
}
aligner_args = self.aligner_args
training_args = aligner_args
model_args = self.model_args
data_args = self.data_args
world_size = int(os.getenv("WORLD_SIZE", "1"))
set_seed(42 + training_args.local_rank)
ITERATION = aligner_args.num_raft_iteration
collection_strategy = aligner_args.collection_strategy
sft_batch_size = aligner_args.raft_batch_size
if collection_strategy == "top":
alpha = aligner_args.top_reward_percentage
M = int(sft_batch_size / world_size / alpha)
elif collection_strategy == "local":
K = int(1/aligner_args.top_reward_percentage)
M = int(sft_batch_size / world_size)
else:
raise NotImplementedError("We only support two data collection strategies")
print(M, K)
if training_args.local_rank == 0:
print(aligner_args)
self.store_dir = aligner_args.output_dir
self.reward_seq = []
self.train_reawrd = []
data_size = len(dataset['input'])
lr = training_args.learning_rate
random_idxs = np.arange(data_size)
np.random.shuffle(random_idxs)
raft_trainer = self._initialize_trainer(model, tokenizer, training_args)
raft_trainer.train(resume_from_checkpoint=False, is_first_time=True)
for iteration in range(ITERATION):
set_seed(666 + training_args.local_rank + world_size * (iteration+1))
end_idx = np.min([data_size, (iteration+1) * M])
batch_input = dataset.select(random_idxs[iteration * M : end_idx])
model.gradient_checkpointing_disable()
model.config.use_cache = True
start_time = time.time()
if collection_strategy == "top":
selected_dataset = self._get_batch_dataset_top(
raft_trainer.tmp_model,
batch_input,
alpha,
iteration,
training_args.local_rank,
output_min_length=aligner_args.output_min_length,
output_max_length=aligner_args.output_max_length,
infer_batch_size=aligner_args.inference_batch_size_per_device,
generation_kwargs=generation_kwargs,
tokenizer=tokenizer,
training_args=training_args,
reward_model=reward_model,
output_reward_path=aligner_args.output_reward_path,
)
elif collection_strategy == "local":
selected_dataset = self._get_batch_dataset_local(
raft_trainer.tmp_model,
batch_input,
K,
iteration,
training_args.local_rank,
output_min_length=aligner_args.output_min_length,
output_max_length=aligner_args.output_max_length,
infer_batch_size=K,
generation_kwargs=generation_kwargs,
tokenizer=tokenizer,
training_args=training_args,
reward_model=reward_model,
output_reward_path=aligner_args.output_reward_path,
)
end_time = time.time()
logger.info("It takes %.2f s to inference one stage", end_time - start_time)
raft_trainer.train_dataset = self._load_dataset(
selected_dataset,
raft_trainer.tmp_model,
tokenizer,
model_args,
data_args,
training_args,
)
logger.info(f"iter {iteration}")
start_time = time.time()
model.gradient_checkpointing_enable()
model.config.use_cache = False
train_result = raft_trainer.train(resume_from_checkpoint=False)
end_time = time.time()
logger.info("It takes %.2f s to train one stage", end_time - start_time)
if (iteration+1) * M > data_size:
logger.info("One epoch is completed.")
break
'''
if training_args.local_rank == 0 and iteration % 2 == 0:
wrapped_model.save(aligner_args.output_dir + "/" + "model" + str(iteration))
print(iteration, "I save a model with", self.reward_seq[-1])
'''
if aligner_args.output_dir is not None:
wrapped_model.save(aligner_args.output_dir)
return wrapped_model |
Perform alignment for a model
Parameters
------------
model : BaseModel object.
dataset: Dataset object.
Input dataset for model to generate outputs. The input and output
will then be feed into reward model to get the reward for
alignment.
reward_model: RegressionModel object.
| align | python | OptimalScale/LMFlow | src/lmflow/pipeline/raft_aligner.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/raft_aligner.py | Apache-2.0 |
def __call__(self, batch: Dict[str, np.ndarray]):
"""batch: Dict[str, np.ndarray]
Example (batch size=2):
{'input': array(['...','...'], dtype=object),
'output': array([array(["...", "..."], dtype=object), array(['...','...'], dtype=object)], dtype=object),
'input_ids': array([[[128000, 128006, 882, ..., 128256, 128256, 128256],
[128000, 128006, 882, ..., 128256, 128256, 128256]],
[[128000, 128006, 882, ..., 128256, 128256, 128256],
[128000, 128006, 882, ..., 128256, 128256, 128256]]])}
"""
# The batch is managed by ray and the actual batch size may smaller than
# inference_batch_size in config, since there may be some remainders.
# For example, 10 examples with 2 inference instances and inference_batch_size=4,
# there will be only 2 examples for instance 0 to run and then the
# actual batch size changes.
actual_batch_size = len(batch['input'])
input_tensor = torch.LongTensor([
[list(arr) for arr in batch['input_ids'][batch_idx]]
for batch_idx in range(actual_batch_size)
]).flatten(start_dim=0, end_dim=1).to("cuda")
batched_inference_res = self.model.inference(input_tensor).logits
batched_inference_res = batched_inference_res.to("cpu").reshape(actual_batch_size, -1, 1).squeeze(dim=-1).tolist()
# [bs, num_output_sequences]
batched_final_res = {
"input": batch['input'].tolist(),
"output": [
[
{"score": batched_inference_res[j][i], "text": batch["output"][j][i]}
for i in range(len(batch['output'][j]))
]
for j in range(actual_batch_size)
],
} # do this since we're writing to a pandas dataframe
return batched_final_res | batch: Dict[str, np.ndarray]
Example (batch size=2):
{'input': array(['...','...'], dtype=object),
'output': array([array(["...", "..."], dtype=object), array(['...','...'], dtype=object)], dtype=object),
'input_ids': array([[[128000, 128006, 882, ..., 128256, 128256, 128256],
[128000, 128006, 882, ..., 128256, 128256, 128256]],
[[128000, 128006, 882, ..., 128256, 128256, 128256],
[128000, 128006, 882, ..., 128256, 128256, 128256]]])}
| __call__ | python | OptimalScale/LMFlow | src/lmflow/pipeline/rm_inferencer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/rm_inferencer.py | Apache-2.0 |
def inference(
self,
model: HFDecoderModel,
dataset: Dataset,
enable_decode_inference_result: bool = True,
release_gpu: bool = False,
inference_args: Optional[InferencerArguments] = None,
enable_distributed_inference: bool = False,
**kwargs,
) -> List[VLLMInferenceResultWithInput]:
"""Perform inference using the provided model and dataset. Will save inference results if
`save_results` is set to True in `inferencer_args`.
Parameters
----------
model : HFDecoderModel
LMFlow HFDecoderModel object
dataset : Dataset
LMFlow Dataset object
apply_chat_template : bool, optional
Whether to apply chat template to the input, by default True.
enable_decode_inference_result : bool, optional
Whether to decode after generation, by default False.
release_gpu : bool, optional
Whether to release gpu resources, by default False.
inference_args : InferencerArguments, optional
by default None
Returns
-------
List[VLLMInferenceResultWithInput]
Return a list of VLLMInferenceResultWithInput, where each
element contains the input prompt and the corresponding output.
When `enable_decode_inference_result = True`, the output would be a list of strings,
contains sampling_params.n samples for the corresponding prompt.
When `enable_decode_inference_result = False`, return a list of list of ints
(token ids, no decoding after generation).
"""
if inference_args:
logger.warning(
"Overriding the default inference arguments with the provided arguments in .inference()"
)
sampling_params = self.parse_to_sampling_params(inference_args)
else:
sampling_params = self.sampling_params
sampling_params.detokenize = enable_decode_inference_result
model_input = model.prepare_inputs_for_inference(
dataset=dataset,
apply_chat_template=self.inferencer_args.apply_chat_template,
use_vllm=self.inferencer_args.use_vllm,
enable_distributed_inference=enable_distributed_inference,
)
if enable_distributed_inference:
outputs = self._distributed_inference(
model=model,
model_input=model_input,
sampling_params=sampling_params,
num_instances=kwargs.get("distributed_inference_num_instances"),
batch_size=kwargs.get("inference_batch_size", 4),
release_gpu=release_gpu,
)
else:
outputs = self._inference(
model=model,
model_input=model_input,
sampling_params=sampling_params,
release_gpu=release_gpu,
)
if self.inferencer_args.save_results:
self.save_inference_results(outputs, self.inferencer_args.results_path)
return outputs | Perform inference using the provided model and dataset. Will save inference results if
`save_results` is set to True in `inferencer_args`.
Parameters
----------
model : HFDecoderModel
LMFlow HFDecoderModel object
dataset : Dataset
LMFlow Dataset object
apply_chat_template : bool, optional
Whether to apply chat template to the input, by default True.
enable_decode_inference_result : bool, optional
Whether to decode after generation, by default False.
release_gpu : bool, optional
Whether to release gpu resources, by default False.
inference_args : InferencerArguments, optional
by default None
Returns
-------
List[VLLMInferenceResultWithInput]
Return a list of VLLMInferenceResultWithInput, where each
element contains the input prompt and the corresponding output.
When `enable_decode_inference_result = True`, the output would be a list of strings,
contains sampling_params.n samples for the corresponding prompt.
When `enable_decode_inference_result = False`, return a list of list of ints
(token ids, no decoding after generation).
| inference | python | OptimalScale/LMFlow | src/lmflow/pipeline/vllm_inferencer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/vllm_inferencer.py | Apache-2.0 |
def __call__(self, batch: Dict[str, np.ndarray]):
"""batch: Dict[str, np.ndarray], {"item": array(['...', '...', '...', ...])}
"""
batched_inference_res = self.model.inference(
inputs=batch['item'],
sampling_params=self.sampling_params,
release_gpu=self.release_gpu,
use_vllm=True,
) # this is the postprocessed output, see model.__vllm_inference
batched_final_res = {
"input": [sample['input'] for sample in batched_inference_res],
"output": [sample['output'] for sample in batched_inference_res]
} # do this since we're writing to a pandas dataframe
return batched_final_res | batch: Dict[str, np.ndarray], {"item": array(['...', '...', '...', ...])}
| __call__ | python | OptimalScale/LMFlow | src/lmflow/pipeline/vllm_inferencer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/vllm_inferencer.py | Apache-2.0 |
def tokenize_batch_element(
self,
prompt: str,
chosen: str,
rejected: str,
) -> Dict:
"""Tokenize a single batch element.
At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation
in case the prompt + chosen or prompt + rejected responses is/are too long. First
we truncate the prompt; if we're still too long, we truncate the chosen/rejected.
We also create the labels for the chosen/rejected responses, which are of length equal to
the sum of the length of the prompt and the chosen/rejected response, with
label_pad_token_id for the prompt tokens.
"""
batch = {}
if self.is_encoder_decoder:
raise NotImplementedError
chosen_tokens = self.tokenizer(chosen, add_special_tokens=False)
rejected_tokens = self.tokenizer(rejected, add_special_tokens=False)
prompt_tokens = self.tokenizer(prompt, add_special_tokens=False)
eos_token_id = self.tokenizer.eos_token_id
# Get indices in list prompt_tokens["input_ids"] that equals the EOS token (often 0)
eos_indices_prompt = [i for i, x in enumerate(prompt_tokens["input_ids"]) if x == eos_token_id]
# attention mask these indices to eos_token_id
if self.mask_prompt:
new_attention_mask = [0 for i, p in enumerate(prompt_tokens["attention_mask"])]
else:
new_attention_mask = [
0 if i in eos_indices_prompt else p for i, p in enumerate(prompt_tokens["attention_mask"])
]
prompt_tokens["attention_mask"] = new_attention_mask
# do the same for chosen and rejected
eos_indices_chosen = [i for i, x in enumerate(chosen_tokens["input_ids"]) if x == eos_token_id]
new_attention_mask_c = [
0 if i in eos_indices_chosen else p for i, p in enumerate(chosen_tokens["attention_mask"])
]
chosen_tokens["attention_mask"] = new_attention_mask_c
eos_indices_rejected = [i for i, x in enumerate(rejected_tokens["input_ids"]) if x == eos_token_id]
new_attention_mask_r = [
0 if i in eos_indices_rejected else p for i, p in enumerate(rejected_tokens["attention_mask"])
]
rejected_tokens["attention_mask"] = new_attention_mask_r
# add EOS token to end of prompt
chosen_tokens["input_ids"].append(self.tokenizer.eos_token_id)
chosen_tokens["attention_mask"].append(1)
rejected_tokens["input_ids"].append(self.tokenizer.eos_token_id)
rejected_tokens["attention_mask"].append(1)
longer_response_length = max(len(chosen_tokens["input_ids"]), len(rejected_tokens["input_ids"]))
# if combined sequence is too long, truncate the prompt
if len(prompt_tokens["input_ids"]) + longer_response_length > self.max_length:
if self.truncation_mode == "keep_start":
prompt_tokens = {k: v[: self.max_prompt_length] for k, v in prompt_tokens.items()}
elif self.truncation_mode == "keep_end":
prompt_tokens = {k: v[-self.max_prompt_length :] for k, v in prompt_tokens.items()}
else:
raise ValueError(f"Unknown truncation mode: {self.truncation_mode}")
# if that's still too long, truncate the response
if len(prompt_tokens["input_ids"]) + longer_response_length > self.max_length:
chosen_tokens = {k: v[: self.max_length - self.max_prompt_length] for k, v in chosen_tokens.items()}
rejected_tokens = {
k: v[: self.max_length - self.max_prompt_length] for k, v in rejected_tokens.items()
}
# Create labels
chosen_sequence_tokens = {k: prompt_tokens[k] + chosen_tokens[k] for k in chosen_tokens}
rejected_sequence_tokens = {k: prompt_tokens[k] + rejected_tokens[k] for k in rejected_tokens}
chosen_sequence_tokens["labels"] = chosen_sequence_tokens["input_ids"][:]
chosen_sequence_tokens["labels"][: len(prompt_tokens["input_ids"])] = [self.label_pad_token_id] * len(
prompt_tokens["input_ids"]
)
rejected_sequence_tokens["labels"] = rejected_sequence_tokens["input_ids"][:]
rejected_sequence_tokens["labels"][: len(prompt_tokens["input_ids"])] = [self.label_pad_token_id] * len(
prompt_tokens["input_ids"]
)
for k, toks in {
"chosen": chosen_sequence_tokens,
"rejected": rejected_sequence_tokens,
"prompt": prompt_tokens,
}.items():
for type_key, tokens in toks.items():
if type_key == "token_type_ids":
continue
batch[f"{k}_{type_key}"] = tokens
batch["prompt"] = prompt
batch["chosen"] = prompt + chosen
batch["rejected"] = prompt + rejected
batch["chosen_response_only"] = chosen
batch["rejected_response_only"] = rejected
return batch | Tokenize a single batch element.
At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation
in case the prompt + chosen or prompt + rejected responses is/are too long. First
we truncate the prompt; if we're still too long, we truncate the chosen/rejected.
We also create the labels for the chosen/rejected responses, which are of length equal to
the sum of the length of the prompt and the chosen/rejected response, with
label_pad_token_id for the prompt tokens.
| tokenize_batch_element | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/dpov2_dataprocessor.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/dpov2_dataprocessor.py | Apache-2.0 |
def dpo_loss(
self,
policy_chosen_logps: torch.FloatTensor,
policy_rejected_logps: torch.FloatTensor,
reference_chosen_logps: torch.FloatTensor,
reference_rejected_logps: torch.FloatTensor,
reference_free: bool = False,
margin: Optional[torch.FloatTensor] = None,
len_penalty: float = 0,
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Compute the DPO loss for a batch of policy and reference model log probabilities.
Args:
policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,)
policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,)
reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (batch_size,)
reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (batch_size,)
beta: Temperature parameter for the DPO loss, typically something in the range of 0.1 to 0.5. We ignore the reference model as beta -> 0.
reference_free: If True, we ignore the _provided_ reference model and implicitly use a reference model that assigns equal probability to all responses.
Returns:
A tuple of three tensors: (losses, chosen_rewards, rejected_rewards).
The losses tensor contains the DPO loss for each example in the batch.
The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.
"""
pi_logratios = policy_chosen_logps - policy_rejected_logps
ref_logratios = reference_chosen_logps - reference_rejected_logps + len_penalty
if reference_free:
ref_logratios = 0
if self.loss_type == "sigmoid":
logits = pi_logratios - ref_logratios
losses = -F.logsigmoid(self.beta * logits)
elif self.loss_type == "hinge":
logits = pi_logratios - ref_logratios
losses = torch.relu(1 - self.beta * logits)
elif self.loss_type == "cross_entropy":
logits = policy_chosen_logps - reference_chosen_logps
losses = -F.logsigmoid(self.beta * logits)
elif self.loss_type == "raft":
losses = -policy_chosen_logps # F.logsigmoid(self.beta * logits)
elif self.loss_type == "ipo":
logits = pi_logratios - ref_logratios
# eqn (17) of the paper where beta is the regularization parameter for the IPO loss, denoted by tau in the paper.
losses = (logits - 1 / (2 * self.beta)) ** 2
elif self.loss_type == "kl":
logits = pi_logratios - ref_logratios
p = F.sigmoid(self.beta * logits)
p = torch.minimum(p, torch.ones_like(p) * 0.999)
p_gt = torch.exp(margin) / (1 + torch.exp(margin) + 1e-3)
losses = p * (torch.log(p) - torch.log(p_gt)) + (1 - p) * (torch.log(1 - p) - torch.log(1 - p_gt))
elif self.loss_type == "tv":
logits = pi_logratios - ref_logratios
p = F.sigmoid(self.beta * logits)
p_gt = torch.exp(margin) / (1 + torch.exp(margin))
losses = torch.abs(p - p_gt)
elif self.loss_type == "hellinger":
logits = pi_logratios - ref_logratios
p = F.sigmoid(self.beta * logits)
p = torch.minimum(p, torch.ones_like(p) * 0.999)
p_gt = torch.exp(margin) / (1 + torch.exp(margin))
losses = 0.5 * ((p**0.5 - p_gt**0.5) ** 2 + ((1 - p) ** 0.5 - (1 - p_gt) ** 0.5) ** 2)
elif self.loss_type == "rev_kl":
logits = pi_logratios - ref_logratios
logp = F.logsigmoid(self.beta * logits)
logp_neg = F.logsigmoid(-self.beta * logits)
p_gt = F.sigmoid(margin)
losses = -p_gt * (logp) - (1 - p_gt) * logp_neg
else:
raise ValueError(f"Unknown loss type: {self.loss_type}.")
chosen_rewards = self.beta * (policy_chosen_logps - reference_chosen_logps).detach()
rejected_rewards = self.beta * (policy_rejected_logps - reference_rejected_logps).detach()
return losses, chosen_rewards, rejected_rewards | Compute the DPO loss for a batch of policy and reference model log probabilities.
Args:
policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,)
policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,)
reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (batch_size,)
reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (batch_size,)
beta: Temperature parameter for the DPO loss, typically something in the range of 0.1 to 0.5. We ignore the reference model as beta -> 0.
reference_free: If True, we ignore the _provided_ reference model and implicitly use a reference model that assigns equal probability to all responses.
Returns:
A tuple of three tensors: (losses, chosen_rewards, rejected_rewards).
The losses tensor contains the DPO loss for each example in the batch.
The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.
| dpo_loss | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/dpov2_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/dpov2_trainer.py | Apache-2.0 |
def get_batch_metrics(
self,
model,
batch: Dict[str, Union[List, torch.LongTensor]],
train_eval: Literal["train", "eval"] = "train",
):
"""Compute the DPO loss and other metrics for the given batch of inputs for train or test."""
metrics = {}
(
policy_chosen_logps,
policy_rejected_logps,
policy_chosen_logits,
policy_rejected_logits,
) = self.concatenated_forward(model, batch)
with torch.no_grad():
if self.ref_model is None:
with self.accelerator.unwrap_model(self.model).disable_adapter():
(
reference_chosen_logps,
reference_rejected_logps,
_,
_,
) = self.concatenated_forward(self.model, batch)
else:
(
reference_chosen_logps,
reference_rejected_logps,
_,
_,
) = self.concatenated_forward(self.ref_model, batch)
if self.len_penalty > 0:
chosen_len = batch["chosen_input_ids"].shape[1] * self.len_penalty
rejected_len = batch["rejected_input_ids"].shape[1] * self.len_penalty
len_penalty = chosen_len - rejected_len
else:
chosen_len = 1
rejected_len = 1
len_penalty = 0
margin = torch.tensor(batch["margin"], dtype=policy_chosen_logps.dtype).to(self.accelerator.device)
losses, chosen_rewards, rejected_rewards = self.dpo_loss(
policy_chosen_logps,
policy_rejected_logps,
reference_chosen_logps,
reference_rejected_logps,
margin=margin,
len_penalty=len_penalty,
)
reward_accuracies = (chosen_rewards > rejected_rewards).float()
prefix = "eval_" if train_eval == "eval" else ""
metrics[f"{prefix}rewards/chosen"] = chosen_rewards.cpu().mean()
metrics[f"{prefix}rewards/rejected"] = rejected_rewards.cpu().mean()
metrics[f"{prefix}rewards/accuracies"] = reward_accuracies.cpu().mean()
metrics[f"{prefix}rewards/margins"] = (chosen_rewards - rejected_rewards).cpu().mean()
metrics[f"{prefix}logps/rejected"] = policy_rejected_logps.detach().cpu().mean()
metrics[f"{prefix}logps/chosen"] = policy_chosen_logps.detach().cpu().mean()
metrics[f"{prefix}logits/rejected"] = policy_rejected_logits.detach().cpu().mean()
metrics[f"{prefix}logits/chosen"] = policy_chosen_logits.detach().cpu().mean()
return losses.mean(), metrics | Compute the DPO loss and other metrics for the given batch of inputs for train or test. | get_batch_metrics | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/dpov2_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/dpov2_trainer.py | Apache-2.0 |
def _save_checkpoint(self, _, trial, metrics=None):
""" Don't save base model, optimizer etc.
but create checkpoint folder (needed for saving adapter) """
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
run_dir = self._get_output_dir(trial=trial)
output_dir = os.path.join(run_dir, checkpoint_folder)
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (self.state.best_metric is None or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
os.makedirs(output_dir, exist_ok=True)
if self.args.should_save:
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir) | Don't save base model, optimizer etc.
but create checkpoint folder (needed for saving adapter) | _save_checkpoint | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/peft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/peft_trainer.py | Apache-2.0 |
def on_epoch_end(self, args: TrainingArguments, state: TrainerState,
control: TrainerControl, **kwargs):
""" Save intermediate model adapters in case of interrupted training """
folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
self._save(kwargs['model'], folder) | Save intermediate model adapters in case of interrupted training | on_epoch_end | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/peft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/peft_trainer.py | Apache-2.0 |
def _get_collator_with_removed_columns(
self, data_collator: Callable, description: Optional[str] = None
) -> Callable:
"""Wrap the data collator in a callable removing unused columns."""
if not self.args.remove_unused_columns:
return data_collator
self._set_signature_columns_if_needed()
signature_columns = self._signature_columns
remove_columns_collator = RemoveColumnsCollator(
data_collator=data_collator,
signature_columns=signature_columns,
logger=logger,
description=description,
model_name=self.model.__class__.__name__,
)
return remove_columns_collator | Wrap the data collator in a callable removing unused columns. | _get_collator_with_removed_columns | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training [`~torch.utils.data.DataLoader`].
Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed
training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_dataset = self.train_dataset
data_collator = self.data_collator
if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
train_dataset = self._remove_unused_columns(train_dataset, description="training")
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description="training")
if isinstance(train_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
train_dataset = IterableDatasetShard(
train_dataset,
batch_size=self._train_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
train_dataset,
batch_size=self._train_batch_size,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
train_sampler = self._get_train_sampler()
return DataLoader(
train_dataset,
batch_size=self._train_batch_size,
sampler=train_sampler,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
worker_init_fn=seed_worker,
) |
Returns the training [`~torch.utils.data.DataLoader`].
Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed
training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
| get_train_dataloader | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation [`~torch.utils.data.DataLoader`].
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (`torch.utils.data.Dataset`, *optional*):
If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
by the `model.forward()` method are automatically removed. It must implement `__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
data_collator = self.data_collator
if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation")
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description="evaluation")
if isinstance(eval_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
eval_dataset = IterableDatasetShard(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
eval_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
) |
Returns the evaluation [`~torch.utils.data.DataLoader`].
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (`torch.utils.data.Dataset`, *optional*):
If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
by the `model.forward()` method are automatically removed. It must implement `__len__`.
| get_eval_dataloader | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test [`~torch.utils.data.DataLoader`].
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (`torch.utils.data.Dataset`, *optional*):
The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed. It must implement `__len__`.
"""
data_collator = self.data_collator
if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
test_dataset = self._remove_unused_columns(test_dataset, description="test")
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description="test")
if isinstance(test_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
test_dataset = IterableDatasetShard(
test_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
test_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
) |
Returns the test [`~torch.utils.data.DataLoader`].
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (`torch.utils.data.Dataset`, *optional*):
The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed. It must implement `__len__`.
| get_test_dataloader | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or
`create_scheduler`) in a subclass.
"""
self.create_optimizer()
if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16:
# If smp >= 1.10 and fp16 is enabled, we unwrap the optimizer
optimizer = self.optimizer.optimizer
else:
optimizer = self.optimizer
self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) |
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or
`create_scheduler`) in a subclass.
| create_optimizer_and_scheduler | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through `optimizers`, or subclass and override this method in a subclass.
"""
opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
if self.optimizer is None:
decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
]
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if optimizer_cls.__name__ == "Adam8bit":
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
skipped = 0
for module in opt_model.modules():
if isinstance(module, nn.Embedding):
skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
print(f"skipped {module}: {skipped/2**20}M params")
manager.register_module_override(module, "weight", {"optim_bits": 32})
logger.debug(f"bitsandbytes: will optimize {module} in fp32")
print(f"skipped: {skipped/2**20}M params")
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
return self.optimizer |
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through `optimizers`, or subclass and override this method in a subclass.
| create_optimizer | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def get_optimizer_cls_and_kwargs(args: TrainingArguments) -> Tuple[Any, Any]:
"""
Returns the optimizer class and optimizer parameters based on the training arguments.
Args:
args (`transformers.training_args.TrainingArguments`):
The training arguments for the training session.
"""
# parse args.optim_args
optim_args = {}
if args.optim_args:
for mapping in args.optim_args.replace(" ", "").split(","):
key, value = mapping.split("=")
optim_args[key] = value
optimizer_kwargs = {"lr": args.learning_rate}
adam_kwargs = {
"betas": (args.adam_beta1, args.adam_beta2),
"eps": args.adam_epsilon,
}
if args.optim == OptimizerNames.ADAFACTOR:
optimizer_cls = Adafactor
optimizer_kwargs.update({"scale_parameter": False, "relative_step": False})
elif args.optim == OptimizerNames.ADAMW_HF:
from transformers.optimization import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]:
from torch.optim import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
if args.optim == OptimizerNames.ADAMW_TORCH_FUSED:
optimizer_kwargs.update({"fused": True})
elif args.optim == OptimizerNames.ADAMW_TORCH_XLA:
try:
from torch_xla.amp.syncfree import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.")
elif args.optim == OptimizerNames.ADAMW_APEX_FUSED:
try:
from apex.optimizers import FusedAdam
optimizer_cls = FusedAdam
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!")
elif args.optim == OptimizerNames.ADAMW_BNB:
try:
from bitsandbytes.optim import Adam8bit
optimizer_cls = Adam8bit
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer tried to instantiate bnb Adam8bit but bnb is not installed!")
elif args.optim == OptimizerNames.ADAMW_ANYPRECISION:
try:
from torchdistx.optimizers import AnyPrecisionAdamW
optimizer_cls = AnyPrecisionAdamW
optimizer_kwargs.update(adam_kwargs)
# TODO Change dtypes back to M=FP32, Var = BF16, Kahan = False once they can be cast together in torchdistx.
optimizer_kwargs.update(
{
"use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")),
"momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")),
"variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")),
"compensation_buffer_dtype": getattr(
torch, optim_args.get("compensation_buffer_dtype", "bfloat16")
),
}
)
except ImportError:
raise ValueError("Please install https://github.com/pytorch/torchdistx")
elif args.optim == OptimizerNames.SGD:
optimizer_cls = torch.optim.SGD
elif args.optim == OptimizerNames.ADAGRAD:
optimizer_cls = torch.optim.Adagrad
else:
raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}")
return optimizer_cls, optimizer_kwargs |
Returns the optimizer class and optimizer parameters based on the training arguments.
Args:
args (`transformers.training_args.TrainingArguments`):
The training arguments for the training session.
| get_optimizer_cls_and_kwargs | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
"""
Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
passed as an argument.
Args:
num_training_steps (int): The number of training steps to do.
"""
############
num_training_steps *= 3
############
if self.lr_scheduler is None:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
optimizer=self.optimizer if optimizer is None else optimizer,
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
)
return self.lr_scheduler |
Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
passed as an argument.
Args:
num_training_steps (int): The number of training steps to do.
| create_scheduler | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When
dataloader.dataset does not exist or has no length, estimates as best it can
"""
try:
dataset = dataloader.dataset
# Special case for IterableDatasetShard, we need to dig deeper
if isinstance(dataset, IterableDatasetShard):
return len(dataloader.dataset.dataset)
return len(dataloader.dataset)
except (NameError, AttributeError, TypeError): # no dataset or length, estimate by length of dataloader
return len(dataloader) * self.args.per_device_train_batch_size |
Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When
dataloader.dataset does not exist or has no length, estimates as best it can
| num_examples | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
ignore_keys_for_eval: Optional[List[str]] = None,
is_first_time = False,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (`str` or `bool`, *optional*):
If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a
`bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance
of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here.
trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):
The trial run or the hyperparameter dictionary for hyperparameter search.
ignore_keys_for_eval (`List[str]`, *optional*)
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions for evaluation during the training.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
if resume_from_checkpoint is False:
resume_from_checkpoint = None
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
#self.is_in_train = True
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train:
self._move_model_to_device(self.model, args.device)
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
self._train_batch_size = self.args.train_batch_size
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None and not is_sagemaker_mp_enabled() and args.deepspeed is None:
self._load_from_checkpoint(resume_from_checkpoint)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self._move_model_to_device(self.model, args.device)
self.model_wrapped = self.model
if is_first_time:
inner_training_loop1 = find_executable_batch_size(
self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size
)
return inner_training_loop1(
args=args,
resume_from_checkpoint=resume_from_checkpoint,
trial=trial,
ignore_keys_for_eval=ignore_keys_for_eval,
)
else:
inner_training_loop2 = find_executable_batch_size(
self._one_train, self._train_batch_size, args.auto_find_batch_size
)
return inner_training_loop2(
args=args,
resume_from_checkpoint=resume_from_checkpoint,
trial=trial,
ignore_keys_for_eval=ignore_keys_for_eval,
) |
Main training entry point.
Args:
resume_from_checkpoint (`str` or `bool`, *optional*):
If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a
`bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance
of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here.
trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):
The trial run or the hyperparameter dictionary for hyperparameter search.
ignore_keys_for_eval (`List[str]`, *optional*)
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions for evaluation during the training.
kwargs:
Additional keyword arguments used to hide deprecated arguments
| train | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def _inner_training_loop(
self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None
):
'''
0 This function serves to train one time
1 Update the self.train_dataset before calling this function
'''
# 1 Get dataloader
self._train_batch_size = batch_size
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
len_dataloader = None
if has_length(train_dataloader):
len_dataloader = len(train_dataloader)
num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
num_examples = self.num_examples(train_dataloader)
if args.max_steps > 0:
max_steps = args.max_steps
num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
args.max_steps % num_update_steps_per_epoch > 0
)
# May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's
# the best we can do.
num_train_samples = args.max_steps * total_train_batch_size
else:
max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(args.num_train_epochs)
num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs
elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size
max_steps = args.max_steps
# Setting a very large number of epochs so we go as many times as necessary over the iterator.
num_train_epochs = sys.maxsize
num_update_steps_per_epoch = max_steps
num_examples = total_train_batch_size * args.max_steps
num_train_samples = args.max_steps * total_train_batch_size
else:
raise ValueError(
"args.max_steps must be set to a positive value if dataloader does not have a length, was"
f" {args.max_steps}"
)
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
if self.args.n_gpu > 1:
# nn.DataParallel(model) replicates the model, creating new variables and module
# references registered here no longer work on other gpus, breaking the module
raise ValueError(
"Currently --debug underflow_overflow is not supported under DP. Please use DDP"
" (torch.distributed.launch)."
)
else:
debug_overflow = DebugUnderflowOverflow(self.model) # noqa
delay_optimizer_creation = (
self.sharded_ddp is not None
and self.sharded_ddp != ShardedDDPOption.SIMPLE
or is_sagemaker_mp_enabled()
or self.fsdp is not None
)
if args.deepspeed:
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
#print("I just create a optimizer here!") # called
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Activate gradient checkpointing if needed
if args.gradient_checkpointing:
self.model.gradient_checkpointing_enable()
#model = self._wrap_model(self.model_wrapped)
self.tmp_model = self._wrap_model(self.model_wrapped)
#if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None:
# self._load_from_checkpoint(resume_from_checkpoint, model)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if self.tmp_model is not self.model:
self.model_wrapped = self.tmp_model
if delay_optimizer_creation:
print("I create here!") # not called
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
return True
# Check if saved optimizer or scheduler states exist
#self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc. |
0 This function serves to train one time
1 Update the self.train_dataset before calling this function
| _inner_training_loop | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if self.deepspeed:
# deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
return
checkpoint_file_exists = (
glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*")
if is_sagemaker_mp_enabled()
else os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME))
)
if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
if is_sagemaker_mp_enabled():
if os.path.isfile(os.path.join(checkpoint, "user_content.pt")):
# Optimizer checkpoint was saved with smp >= 1.10
def opt_load_hook(mod, opt):
opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
else:
# Optimizer checkpoint was saved with smp < 1.10
def opt_load_hook(mod, opt):
if IS_SAGEMAKER_MP_POST_1_10:
opt.load_state_dict(
smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True)
)
else:
opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
self.model_wrapped.register_post_step_hook(opt_load_hook)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME)))
reissue_pt_warnings(caught_warnings)
if self.do_grad_scaling and os.path.isfile(os.path.join(checkpoint, SCALER_NAME)):
self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, SCALER_NAME))) | If optimizer and scheduler states exist, load them. | _load_optimizer_and_scheduler | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined
by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,
the sum of all metrics otherwise.
<Tip warning={true}>
To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to
reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to
subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom
optimizer/scheduler.
</Tip>
Args:
hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*):
A function that defines the hyperparameter search space. Will default to
[`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or
[`~trainer_utils.default_hp_space_sigopt`] depending on your backend.
compute_objective (`Callable[[Dict[str, float]], float]`, *optional*):
A function computing the objective to minimize or maximize from the metrics returned by the `evaluate`
method. Will default to [`~trainer_utils.default_compute_objective`].
n_trials (`int`, *optional*, defaults to 100):
The number of trial runs to test.
direction (`str`, *optional*, defaults to `"minimize"`):
Whether to optimize greater or lower objects. Can be `"minimize"` or `"maximize"`, you should pick
`"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics.
backend (`str` or [`~training_utils.HPSearchBackend`], *optional*):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending
on which one is installed. If all are installed, will default to optuna.
hp_name (`Callable[["optuna.Trial"], str]]`, *optional*):
A function that defines the trial/run name. Will default to None.
kwargs (`Dict[str, Any]`, *optional*):
Additional keyword arguments passed along to `optuna.create_study` or `ray.tune.run`. For more
information see:
- the documentation of
[optuna.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html)
- the documentation of [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run)
- the documentation of [sigopt](https://app.sigopt.com/docs/endpoints/experiments/create)
Returns:
[`trainer_utils.BestRun`]: All the information about the best run. Experiment summary can be found in
`run_summary` attribute for Ray backend.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`. "
"To install ray run `pip install ray[tune]`. "
"To install sigopt run `pip install sigopt`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA:
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY:
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
if backend == HPSearchBackend.SIGOPT:
raise RuntimeError("You picked the sigopt backend, but it is not installed. Use `pip install sigopt`.")
if backend == HPSearchBackend.WANDB:
raise RuntimeError("You picked the wandb backend, but it is not installed. Use `pip install wandb`.")
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
try:
backend_dict = {
HPSearchBackend.OPTUNA: run_hp_search_optuna,
HPSearchBackend.RAY: run_hp_search_ray,
HPSearchBackend.SIGOPT: run_hp_search_sigopt,
HPSearchBackend.WANDB: run_hp_search_wandb,
}
backend_run = backend_dict[backend]
except NameError:
ALL_HYPERPARAMETER_SEARCH_BACKENDS
backend_obj = ALL_HYPERPARAMETER_SEARCH_BACKENDS[backend]()
backend_run = backend_obj.run
try:
from transformers.trainer_utils import default_hp_space
except ImportError:
default_hp_space = backend_obj.default_hp_space
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
best_run = backend_run(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run |
Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined
by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,
the sum of all metrics otherwise.
<Tip warning={true}>
To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to
reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to
subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom
optimizer/scheduler.
</Tip>
Args:
hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*):
A function that defines the hyperparameter search space. Will default to
[`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or
[`~trainer_utils.default_hp_space_sigopt`] depending on your backend.
compute_objective (`Callable[[Dict[str, float]], float]`, *optional*):
A function computing the objective to minimize or maximize from the metrics returned by the `evaluate`
method. Will default to [`~trainer_utils.default_compute_objective`].
n_trials (`int`, *optional*, defaults to 100):
The number of trial runs to test.
direction (`str`, *optional*, defaults to `"minimize"`):
Whether to optimize greater or lower objects. Can be `"minimize"` or `"maximize"`, you should pick
`"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics.
backend (`str` or [`~training_utils.HPSearchBackend`], *optional*):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending
on which one is installed. If all are installed, will default to optuna.
hp_name (`Callable[["optuna.Trial"], str]]`, *optional*):
A function that defines the trial/run name. Will default to None.
kwargs (`Dict[str, Any]`, *optional*):
Additional keyword arguments passed along to `optuna.create_study` or `ray.tune.run`. For more
information see:
- the documentation of
[optuna.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html)
- the documentation of [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run)
- the documentation of [sigopt](https://app.sigopt.com/docs/endpoints/experiments/create)
Returns:
[`trainer_utils.BestRun`]: All the information about the best run. Experiment summary can be found in
`run_summary` attribute for Ray backend.
| hyperparameter_search | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def log(self, logs: Dict[str, float]) -> None:
"""
Log `logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs) |
Log `logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (`Dict[str, float]`):
The values to log.
| log | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]:
"""
Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors.
"""
if isinstance(data, Mapping):
return type(data)({k: self._prepare_input(v) for k, v in data.items()})
elif isinstance(data, (tuple, list)):
return type(data)(self._prepare_input(v) for v in data)
elif isinstance(data, torch.Tensor):
kwargs = {"device": self.args.device}
if self.deepspeed and (torch.is_floating_point(data) or torch.is_complex(data)):
# NLP models inputs are int/uint and those get adjusted to the right dtype of the
# embedding. Other models such as wav2vec2's inputs are already float and thus
# may need special handling to match the dtypes of the model
kwargs.update({"dtype": self.args.hf_deepspeed_config.dtype()})
return data.to(**kwargs)
return data |
Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors.
| _prepare_input | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
inputs = self._prepare_input(inputs)
if len(inputs) == 0:
raise ValueError(
"The batch received was empty, your model won't be able to train on it. Double-check that your "
f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}."
)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs |
Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
| _prepare_inputs | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True):
"""
A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired
arguments, depending on the situation.
"""
if self.use_cuda_amp or self.use_cpu_amp:
if is_torch_greater_or_equal_than_1_10:
ctx_manager = (
torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype)
if self.use_cpu_amp
else torch.cuda.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype)
)
else:
ctx_manager = torch.cuda.amp.autocast()
else:
ctx_manager = contextlib.nullcontext() if sys.version_info >= (3, 7) else contextlib.suppress()
return ctx_manager |
A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired
arguments, depending on the situation.
| autocast_smart_context_manager | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to train.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
Return:
`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
return loss_mb.reduce_mean().detach().to(self.args.device)
with self.compute_loss_context_manager():
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.do_grad_scaling:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach() |
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to train.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
Return:
`torch.Tensor`: The tensor with training loss on this batch.
| training_step | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values():
loss = self.label_smoother(outputs, labels, shift_labels=True)
else:
loss = self.label_smoother(outputs, labels)
else:
if isinstance(outputs, dict) and "loss" not in outputs:
raise ValueError(
"The model did not return a loss from the inputs, only the following keys: "
f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}."
)
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss |
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
| compute_loss | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be `True` for one process).
"""
# Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
# process index.
if is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.args.process_index == 0 |
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be `True` for one process).
| is_world_process_zero | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False):
"""
Will save the model, so you can reload it using `from_pretrained()`.
Will only save from the main process.
"""
if output_dir is None:
output_dir = self.args.output_dir
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif is_sagemaker_mp_enabled():
# Calling the state_dict needs to be done on the wrapped model and on all processes.
os.makedirs(output_dir, exist_ok=True)
state_dict = self.model_wrapped.state_dict()
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
if IS_SAGEMAKER_MP_POST_1_10:
# 'user_content.pt' indicates model state_dict saved with smp >= 1.10
Path(os.path.join(output_dir, "user_content.pt")).touch()
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp
or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
or self.fsdp is not None
):
state_dict = self.model.state_dict()
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
elif self.deepspeed:
# this takes care of everything as long as we aren't under zero3
if self.args.should_save:
self._save(output_dir)
if is_deepspeed_zero3_enabled():
# It's too complicated to try to override different places where the weights dump gets
# saved, so since under zero3 the file is bogus, simply delete it. The user should
# either user deepspeed checkpoint to resume or to recover full weights use
# zero_to_fp32.py stored in the checkpoint.
if self.args.should_save:
file = os.path.join(output_dir, WEIGHTS_NAME)
if os.path.isfile(file):
# logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
os.remove(file)
# now save the real model if stage3_gather_16bit_weights_on_model_save=True
# if false it will not be saved.
# This must be called on all ranks
if not self.deepspeed.save_16bit_model(output_dir, WEIGHTS_NAME):
logger.warning(
"deepspeed.save_16bit_model didn't save the model, since"
" stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use"
" zero_to_fp32.py to recover weights"
)
self.deepspeed.save_checkpoint(output_dir)
elif self.args.should_save:
self._save(output_dir)
# Push to the Hub when `save_model` is called by the user.
if self.args.push_to_hub and not _internal_call:
self.push_to_hub(commit_message="Model save") |
Will save the model, so you can reload it using `from_pretrained()`.
Will only save from the main process.
| save_model | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init `compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (`Dataset`, *optional*):
Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns
not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
method.
ignore_keys (`Lst[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.log(output.metrics)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics |
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init `compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (`Dataset`, *optional*):
Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns
not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
method.
ignore_keys (`Lst[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
| evaluate | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in `evaluate()`.
Args:
test_dataset (`Dataset`):
Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the
`model.forward()` method are automatically removed. Has to implement the method `__len__`
ignore_keys (`Lst[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"test"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"test_bleu" if the prefix is "test" (default)
<Tip>
If your predictions or labels have different sequence length (for instance because you're doing dynamic padding
in a token classification task) the predictions will be padded (on the right) to allow for concatenation into
one array. The padding index is -100.
</Tip>
Returns: *NamedTuple* A namedtuple with the following keys:
- predictions (`np.ndarray`): The predictions on `test_dataset`.
- label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
- metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics) |
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in `evaluate()`.
Args:
test_dataset (`Dataset`):
Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the
`model.forward()` method are automatically removed. Has to implement the method `__len__`
ignore_keys (`Lst[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"test"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"test_bleu" if the prefix is "test" (default)
<Tip>
If your predictions or labels have different sequence length (for instance because you're doing dynamic padding
in a token classification task) the predictions will be padded (on the right) to allow for concatenation into
one array. The padding index is -100.
</Tip>
Returns: *NamedTuple* A namedtuple with the following keys:
- predictions (`np.ndarray`): The predictions on `test_dataset`.
- label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
- metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
labels).
| predict | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
Works both with or without labels.
"""
args = self.args
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
# if eval is called w/o train init deepspeed here
if args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(
self, num_training_steps=0, resume_from_checkpoint=None, inference=True
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
model = self._wrap_model(self.model, training=False, dataloader=dataloader)
# if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
# while ``train`` is running, cast it to the right dtype first and then put on device
if not self.is_in_train:
if args.fp16_full_eval:
model = model.to(dtype=torch.float16, device=args.device)
elif args.bf16_full_eval:
model = model.to(dtype=torch.bfloat16, device=args.device)
batch_size = self.args.eval_batch_size
logger.info(f"***** Running {description} *****")
if has_length(dataloader):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = getattr(dataloader, "dataset", None)
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)
if args.past_index >= 0:
self._past = None
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
inputs_host = None
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
all_inputs = None
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# For batch samplers, batch_size is not known by the dataloader in advance.
if batch_size is None:
batch_size = observed_batch_size
# Prediction step
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None
if is_torch_tpu_available():
xm.mark_step()
# Update containers on host
if loss is not None:
losses = self._nested_gather(loss.repeat(batch_size))
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if labels is not None:
labels = self._pad_across_processes(labels)
labels = self._nested_gather(labels)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
if inputs_decode is not None:
inputs_decode = self._pad_across_processes(inputs_decode)
inputs_decode = self._nested_gather(inputs_decode)
inputs_host = (
inputs_decode
if inputs_host is None
else nested_concat(inputs_host, inputs_decode, padding_index=-100)
)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
if self.preprocess_logits_for_metrics is not None:
logits = self.preprocess_logits_for_metrics(logits, labels)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if inputs_host is not None:
inputs_decode = nested_numpify(inputs_host)
all_inputs = (
inputs_decode
if all_inputs is None
else nested_concat(all_inputs, inputs_decode, padding_index=-100)
)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = (
labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
)
# Set back to None to begin a new accumulation
losses_host, preds_host, inputs_host, labels_host = None, None, None, None
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if inputs_host is not None:
inputs_decode = nested_numpify(inputs_host)
all_inputs = (
inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100)
)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
# Number of samples
if has_length(eval_dataset):
num_samples = len(eval_dataset)
# The instance check is weird and does not actually check for the type, but whether the dataset has the right
# methods. Therefore we need to make sure it also has the attribute.
elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0:
num_samples = eval_dataset.num_examples
else:
if has_length(dataloader):
num_samples = self.num_examples(dataloader)
else: # both len(dataloader.dataset) and len(dataloader) fail
num_samples = observed_num_examples
if num_samples == 0 and observed_num_examples > 0:
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = all_losses[:num_samples]
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = nested_truncate(all_labels, num_samples)
if all_inputs is not None:
all_inputs = nested_truncate(all_inputs, num_samples)
# Metrics!
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
if args.include_inputs_for_metrics:
metrics = self.compute_metrics(
EvalPrediction(predictions=all_preds, label_ids=all_labels, inputs=all_inputs)
)
else:
metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if all_losses is not None:
metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
if hasattr(self, "jit_compilation_time"):
metrics[f"{metric_key_prefix}_jit_compilation_time"] = self.jit_compilation_time
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples) |
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
Works both with or without labels.
| evaluation_loop | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def _nested_gather(self, tensors, name=None):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
if name is None:
name = "nested_gather"
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return tensors |
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
| _nested_gather | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def _pad_across_processes(self, tensor, pad_index=-100):
"""
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
they can safely be gathered.
"""
if isinstance(tensor, (list, tuple)):
return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)
elif isinstance(tensor, dict):
return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()})
elif not isinstance(tensor, torch.Tensor):
raise TypeError(
f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
)
if len(tensor.shape) < 2:
return tensor
# Gather all sizes
size = torch.tensor(tensor.shape, device=tensor.device)[None]
sizes = self._nested_gather(size).cpu()
max_size = max(s[1] for s in sizes)
# When extracting XLA graphs for compilation, max_size is 0,
# so use inequality to avoid errors.
if tensor.shape[1] >= max_size:
return tensor
# Then pad to the maximum size
old_size = tensor.shape
new_size = list(old_size)
new_size[1] = max_size
new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
new_tensor[:, : old_size[1]] = tensor
return new_tensor |
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
they can safely be gathered.
| _pad_across_processes | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on `model` using `inputs`.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to evaluate.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (`bool`):
Whether or not to return the loss only.
ignore_keys (`Lst[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
logits and labels (each being optional).
"""
has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names)
# For CLIP-like models capable of returning loss values.
# If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss`
# is `True` in `model.forward`.
return_loss = inputs.get("return_loss", None)
if return_loss is None:
return_loss = self.can_return_loss
loss_without_labels = True if len(self.label_names) == 0 and return_loss else False
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels or loss_without_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels or loss_without_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels or loss_without_labels:
with self.compute_loss_context_manager():
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
with self.compute_loss_context_manager():
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels) |
Perform an evaluation step on `model` using `inputs`.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to evaluate.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (`bool`):
Whether or not to return the loss only.
ignore_keys (`Lst[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
logits and labels (each being optional).
| prediction_step | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point
operations for every backward + forward pass. If using another model, either implement such a method in the
model or subclass and override this method.
Args:
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0 |
For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point
operations for every backward + forward pass. If using another model, either implement such a method in the
model or subclass and override this method.
Args:
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
`int`: The number of floating-point operations.
| floating_point_ops | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def init_git_repo(self, at_init: bool = False):
"""
Initializes a git repo in `self.args.hub_model_id`.
Args:
at_init (`bool`, *optional*, defaults to `False`):
Whether this function is called before any training or not. If `self.args.overwrite_output_dir` is
`True` and `at_init` is `True`, the path to the repo (which is `self.args.output_dir`) might be wiped
out.
"""
if not self.is_world_process_zero():
return
if self.args.hub_model_id is None:
repo_name = Path(self.args.output_dir).absolute().name
else:
repo_name = self.args.hub_model_id
if "/" not in repo_name:
repo_name = get_full_repo_name(repo_name, token=self.args.hub_token)
# Make sure the repo exists.
create_repo(repo_name, token=self.args.hub_token, private=self.args.hub_private_repo, exist_ok=True)
try:
self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token)
except EnvironmentError:
if self.args.overwrite_output_dir and at_init:
# Try again after wiping output_dir
shutil.rmtree(self.args.output_dir)
self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token)
else:
raise
self.repo.git_pull()
# By default, ignore the checkpoint folders
if (
not os.path.exists(os.path.join(self.args.output_dir, ".gitignore"))
and self.args.hub_strategy != HubStrategy.ALL_CHECKPOINTS
):
with open(os.path.join(self.args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer:
writer.writelines(["checkpoint-*/"])
# Add "*.sagemaker" to .gitignore if using SageMaker
if os.environ.get("SM_TRAINING_ENV"):
self._add_sm_patterns_to_gitignore()
self.push_in_progress = None |
Initializes a git repo in `self.args.hub_model_id`.
Args:
at_init (`bool`, *optional*, defaults to `False`):
Whether this function is called before any training or not. If `self.args.overwrite_output_dir` is
`True` and `at_init` is `True`, the path to the repo (which is `self.args.output_dir`) might be wiped
out.
| init_git_repo | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
Works both with or without labels.
"""
args = self.args
if not has_length(dataloader):
raise ValueError("dataloader must implement a working __len__")
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
# if eval is called w/o train init deepspeed here
if args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False, dataloader=dataloader)
# if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
# while ``train`` is running, cast it to the right dtype first and then put on device
if not self.is_in_train:
if args.fp16_full_eval:
model = model.to(dtype=torch.float16, device=args.device)
elif args.bf16_full_eval:
model = model.to(dtype=torch.bfloat16, device=args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info(f"***** Running {description} *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Batch size = {batch_size}")
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
inputs_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
inputs_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)
if args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
if inputs_decode is not None:
inputs_host = (
inputs_decode
if inputs_host is None
else nested_concat(inputs_host, inputs_decode, padding_index=-100)
)
self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host, inputs_host = None, None, None, None
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
inputs_ids = inputs_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
if args.include_inputs_for_metrics:
metrics = self.compute_metrics(
EvalPrediction(predictions=preds, label_ids=label_ids, inputs=inputs_ids)
)
else:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples) |
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
Works both with or without labels.
| prediction_loop | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors) |
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
| _gather_and_numpify | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def _add_sm_patterns_to_gitignore(self) -> None:
"""Add SageMaker Checkpointing patterns to .gitignore file."""
# Make sure we only do this on the main process
if not self.is_world_process_zero():
return
patterns = ["*.sagemaker-uploading", "*.sagemaker-uploaded"]
# Get current .gitignore content
if os.path.exists(os.path.join(self.repo.local_dir, ".gitignore")):
with open(os.path.join(self.repo.local_dir, ".gitignore"), "r") as f:
current_content = f.read()
else:
current_content = ""
# Add the patterns to .gitignore
content = current_content
for pattern in patterns:
if pattern not in content:
if content.endswith("\n"):
content += pattern
else:
content += f"\n{pattern}"
# Write the .gitignore file if it has changed
if content != current_content:
with open(os.path.join(self.repo.local_dir, ".gitignore"), "w") as f:
logger.debug(f"Writing .gitignore file. Content: {content}")
f.write(content)
self.repo.git_add(".gitignore")
# avoid race condition with git status
time.sleep(0.5)
if not self.repo.is_repo_clean():
self.repo.git_commit("Add *.sagemaker patterns to .gitignore.")
self.repo.git_push() | Add SageMaker Checkpointing patterns to .gitignore file. | _add_sm_patterns_to_gitignore | python | OptimalScale/LMFlow | src/lmflow/pipeline/utils/raft_trainer.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/pipeline/utils/raft_trainer.py | Apache-2.0 |
def text_to_textlist_tokenize_function(
examples,
data_args: DatasetArguments,
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
column_names,
add_special_tokens,
use_truncation,
) -> Dict:
"""For rm inference, and don't need attn mask and labels.
NOTE: input_ids here refers to the tokenized input_ids of the input **and** output
"""
num_example = len(examples[column_names[0]])
output_dict = {column_name: examples[column_name] for column_name in column_names}
output_dict["input_ids"] = [[] for _ in range(num_example)]
for example_idx in range(num_example):
encoded = tokenizer(
[
examples["input"][example_idx] + examples["output"][example_idx][i]
for i in range(len(examples["output"][example_idx]))
],
add_special_tokens=add_special_tokens,
truncation=use_truncation,
)
output_dict["input_ids"][example_idx] = encoded["input_ids"]
if data_args.disable_group_texts:
output_dict = blocking_text_to_textlist(
token_dict=output_dict,
block_size=data_args.block_size,
model_max_length=tokenizer.model_max_length,
pad_token_id=tokenizer.pad_token_id,
padding_side=tokenizer.padding_side,
truncation_side=tokenizer.truncation_side,
)
return output_dict | For rm inference, and don't need attn mask and labels.
NOTE: input_ids here refers to the tokenized input_ids of the input **and** output
| text_to_textlist_tokenize_function | python | OptimalScale/LMFlow | src/lmflow/tokenization/hf_text_regression_model.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/tokenization/hf_text_regression_model.py | Apache-2.0 |
def make_shell_args_from_dataclass(
dataclass_objects: List,
format: str="subprocess",
skip_default: bool=True,
ignored_args_list: Optional[List[str]]=None,
) -> Union[str, List[str]]:
"""Return a string or a list of strings that can be used as shell arguments.
Parameters
----------
dataclass_objects : List
A list of dataclass objects.
format : str, optional
Return format, can be "shell" or "subprocess", by default "subprocess".
skip_default : bool, optional
Whether to skip attributes with default values, by default True.
Returns
-------
Union[str, List[str]]
"""
assert isinstance(dataclass_objects, list), "dataclass_objects should be a list of dataclass objects."
all_args = {}
for dataclass_object in dataclass_objects:
for k, v in dataclass_object.__dict__.items():
if ignored_args_list and k in ignored_args_list:
continue
if k not in dataclass_object.__dataclass_fields__:
# skip attributes that added dynamically
continue
if not v:
# skip attributes with None values
continue
if skip_default:
if dataclass_object.__dataclass_fields__[k].default == v:
continue
if k not in all_args:
if isinstance(v, Path):
all_args[k] = str(v)
elif isinstance(v, list):
all_args[k] = ",".join(v)
else:
all_args[k] = v
elif k in all_args:
if all_args[k] == v:
continue
else:
logger.warning(f"Found different values for the same key: {k}, using value: {v} instead.")
all_args[k] = v
if format == "shell":
final_res = " ".join([f"--{k} {v}" for k, v in all_args.items()])
elif format == "subprocess":
final_res = []
for k, v in all_args.items():
final_res.extend([f"--{k}", str(v)])
else:
raise ValueError(f"Unknown format: {format}")
return final_res | Return a string or a list of strings that can be used as shell arguments.
Parameters
----------
dataclass_objects : List
A list of dataclass objects.
format : str, optional
Return format, can be "shell" or "subprocess", by default "subprocess".
skip_default : bool, optional
Whether to skip attributes with default values, by default True.
Returns
-------
Union[str, List[str]]
| make_shell_args_from_dataclass | python | OptimalScale/LMFlow | src/lmflow/utils/common.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/common.py | Apache-2.0 |
def create_copied_dataclass(
original_dataclass,
field_prefix: str,
class_prefix: str,
new_default: Dict=None
):
"""Create a copied dataclass with new field names and default values.
Parameters
----------
original_dataclass : dataclass
field_prefix : str
The prefix to add to the **field** names of the copied dataclass.
class_prefix : str
The prefix to add to the **class** name of the copied dataclass.
new_default : Dict, optional
The new default values for the copied dataclass. When None, the
default values of the original dataclass are used.
Returns
-------
dataclass
"""
original_fields = fields(original_dataclass)
new_default = new_default or {}
new_fields = []
for field in original_fields:
if get_python_version().minor >= 10:
new_field = (
f"{field_prefix}{field.name}",
field.type,
Field(
default=new_default.get(f"{field_prefix}{field.name}", field.default),
default_factory=field.default_factory,
init=field.init,
repr=field.repr,
hash=field.hash,
compare=field.compare,
metadata=field.metadata,
kw_only=False, # add in py3.10: https://docs.python.org/3/library/dataclasses.html
)
)
else:
new_field = (
f"{field_prefix}{field.name}",
field.type,
Field(
default=new_default.get(f"{field_prefix}{field.name}", field.default),
default_factory=field.default_factory,
init=field.init,
repr=field.repr,
hash=field.hash,
compare=field.compare,
metadata=field.metadata,
)
)
new_fields.append(new_field)
copied_dataclass = make_dataclass(f"{class_prefix}{original_dataclass.__name__}", new_fields)
return copied_dataclass | Create a copied dataclass with new field names and default values.
Parameters
----------
original_dataclass : dataclass
field_prefix : str
The prefix to add to the **field** names of the copied dataclass.
class_prefix : str
The prefix to add to the **class** name of the copied dataclass.
new_default : Dict, optional
The new default values for the copied dataclass. When None, the
default values of the original dataclass are used.
Returns
-------
dataclass
| create_copied_dataclass | python | OptimalScale/LMFlow | src/lmflow/utils/common.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/common.py | Apache-2.0 |
def remove_dataclass_attr_prefix(data_instance, prefix: str) -> Dict:
"""Remove the prefix from the attribute names of a dataclass instance.
Parameters
----------
data_instance : dataclass
prefix : str
The prefix to remove from the attribute names of the dataclass instance.
Returns
-------
Dict
"""
new_attributes = {}
for field in fields(data_instance):
attr_name = field.name
attr_value = getattr(data_instance, attr_name)
new_attr_name = f"{attr_name[len(prefix):]}"
new_attributes[new_attr_name] = attr_value
return new_attributes | Remove the prefix from the attribute names of a dataclass instance.
Parameters
----------
data_instance : dataclass
prefix : str
The prefix to remove from the attribute names of the dataclass instance.
Returns
-------
Dict
| remove_dataclass_attr_prefix | python | OptimalScale/LMFlow | src/lmflow/utils/common.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/common.py | Apache-2.0 |
def add_dataclass_attr_prefix(data_instance, prefix: str) -> Dict:
"""Add the prefix to the attribute names of a dataclass instance.
Parameters
----------
data_instance : dataclass
prefix : str
The prefix to add to the attribute names of the dataclass instance.
Returns
-------
Dict
"""
new_attributes = {}
for field in fields(data_instance):
attr_name = field.name
attr_value = getattr(data_instance, attr_name)
new_attr_name = f"{prefix}{attr_name}"
new_attributes[new_attr_name] = attr_value
return new_attributes | Add the prefix to the attribute names of a dataclass instance.
Parameters
----------
data_instance : dataclass
prefix : str
The prefix to add to the attribute names of the dataclass instance.
Returns
-------
Dict
| add_dataclass_attr_prefix | python | OptimalScale/LMFlow | src/lmflow/utils/common.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/common.py | Apache-2.0 |
def set_random_seed(seed: int):
"""
Set the random seed for `random`, `numpy`, `torch`, `torch.cuda`.
Parameters
------------
seed : int
The default seed.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) |
Set the random seed for `random`, `numpy`, `torch`, `torch.cuda`.
Parameters
------------
seed : int
The default seed.
| set_random_seed | python | OptimalScale/LMFlow | src/lmflow/utils/data_utils.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/data_utils.py | Apache-2.0 |
def load_data(file_name: str):
"""
Load data with file name.
Parameters
------------
file_name : str.
The dataset file name.
Returns
------------
inputs : list.
The input texts of the dataset.
outputs : list.
The output texts file datasets.
len : int.
The length of the dataset.
"""
inputs = []
outputs = []
type = ""
with open(file_name, encoding='utf-8') as f:
json_data = json.load(f)
type = json_data["type"]
for line in json_data["instances"]:
inputs.append(line["input"])
outputs.append(line["output"])
print(f"load dataset {file_name} success.\n")
print(f"Type : {type}, datasize : {len(outputs)}")
return inputs, outputs, len(outputs) |
Load data with file name.
Parameters
------------
file_name : str.
The dataset file name.
Returns
------------
inputs : list.
The input texts of the dataset.
outputs : list.
The output texts file datasets.
len : int.
The length of the dataset.
| load_data | python | OptimalScale/LMFlow | src/lmflow/utils/data_utils.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/data_utils.py | Apache-2.0 |
def batchlize(examples: list, batch_size: int, random_shuffle: bool):
"""
Convert examples to a dataloader.
Parameters
------------
examples : list.
Data list.
batch_size : int.
random_shuffle : bool
If true, the dataloader shuffle the training data.
Returns
------------
dataloader:
Dataloader with batch generator.
"""
size = 0
dataloader = []
length = len(examples)
if (random_shuffle):
random.shuffle(examples)
while size < length:
if length - size > batch_size:
dataloader.append(examples[size : size+batch_size])
size += batch_size
else:
dataloader.append(examples[size : size+(length-size)])
size += (length - size)
return dataloader |
Convert examples to a dataloader.
Parameters
------------
examples : list.
Data list.
batch_size : int.
random_shuffle : bool
If true, the dataloader shuffle the training data.
Returns
------------
dataloader:
Dataloader with batch generator.
| batchlize | python | OptimalScale/LMFlow | src/lmflow/utils/data_utils.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/data_utils.py | Apache-2.0 |
def preview_file(file_path: str, chars: int = 100):
"""
Returns the first and last specified number of characters from a file
without loading the entire file into memory, working with any file type.
Args:
file_path (str): Path to the file to be previewed
chars (int, optional): Number of characters to show from start and end. Defaults to 100.
Returns:
tuple: (first_chars, last_chars) - The first and last characters from the file
"""
file_size = os.path.getsize(file_path)
with open(file_path, 'r', encoding='utf-8') as f:
first_chars = f.read(chars)
if file_size <= 2 * chars:
return first_chars, ""
last_chunk_position = max(0, file_size - chars)
f.seek(0)
f.seek(last_chunk_position)
last_chars = f.read(chars)
return first_chars, last_chars |
Returns the first and last specified number of characters from a file
without loading the entire file into memory, working with any file type.
Args:
file_path (str): Path to the file to be previewed
chars (int, optional): Number of characters to show from start and end. Defaults to 100.
Returns:
tuple: (first_chars, last_chars) - The first and last characters from the file
| preview_file | python | OptimalScale/LMFlow | src/lmflow/utils/data_utils.py | https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/utils/data_utils.py | Apache-2.0 |
Subsets and Splits
Django Code with Docstrings
Filters Python code examples from Django repository that contain Django-related code, helping identify relevant code snippets for understanding Django framework usage patterns.
SQL Console for Shuu12121/python-treesitter-filtered-datasetsV2
Retrieves specific code examples from the Flask repository but doesn't provide meaningful analysis or patterns beyond basic data retrieval.
HTTPX Repo Code and Docstrings
Retrieves specific code examples from the httpx repository, which is useful for understanding how particular libraries are used but doesn't provide broader analytical insights about the dataset.
Requests Repo Docstrings & Code
Retrieves code examples with their docstrings and file paths from the requests repository, providing basic filtering but limited analytical value beyond finding specific code samples.
Quart Repo Docstrings & Code
Retrieves code examples with their docstrings from the Quart repository, providing basic code samples but offering limited analytical value for understanding broader patterns or relationships in the dataset.