repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/model/model_utils/rope.py
src/llamafactory/model/model_utils/rope.py
# Copyright 2025 LMSYS and the LlamaFactory team. # Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li # # This code is inspired by the LMSYS's FastChat library. # https://github.com/lm-sys/FastChat/blob/v0.2.30/fastchat/train/train.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import TYPE_CHECKING from ...extras import logging from ...extras.constants import RopeScaling if TYPE_CHECKING: from transformers import PretrainedConfig from ...hparams import ModelArguments logger = logging.get_logger(__name__) def configure_rope(config: "PretrainedConfig", model_args: "ModelArguments") -> None: if model_args.rope_scaling is None: return if not hasattr(config, "rope_scaling"): logger.warning_rank0("Current model does not support RoPE scaling.") return rope_scaling = getattr(config, "rope_scaling", None) if isinstance(rope_scaling, dict) and "original_max_position_embeddings" in rope_scaling: old_max_length = rope_scaling["original_max_position_embeddings"] elif hasattr(config, "max_position_embeddings"): old_max_length = getattr(config, "max_position_embeddings", None) else: logger.warning_rank0("Cannot find the max position embeddings in the config.") return if model_args.model_max_length is not None: # training if model_args.model_max_length <= old_max_length: logger.warning_rank0("Input length is smaller than max length. Disabling rope scaling.") return if model_args.rope_scaling == RopeScaling.DYNAMIC: logger.warning_rank0( "Dynamic NTK scaling may not work well with fine-tuning. " "See: https://github.com/huggingface/transformers/pull/24653" ) rope_factor = float(math.ceil(model_args.model_max_length / old_max_length)) else: # inference rope_factor = 2.0 rope_kwargs = { "rope_type": getattr(model_args.rope_scaling, "value", model_args.rope_scaling), # handle enum "factor": rope_factor, } setattr(config, "max_position_embeddings", old_max_length * rope_factor) logger.info_rank0(f"Enlarge max model length from {old_max_length} to {old_max_length * rope_factor}.") if model_args.rope_scaling in [RopeScaling.DYNAMIC, RopeScaling.YARN]: rope_kwargs["original_max_position_embeddings"] = old_max_length elif model_args.rope_scaling == RopeScaling.LLAMA3: rope_kwargs["original_max_position_embeddings"] = old_max_length rope_kwargs["low_freq_factor"] = 1.0 rope_kwargs["high_freq_factor"] = 4.0 setattr(config, "rope_scaling", rope_kwargs) logger.info_rank0( f"Using {rope_kwargs['rope_type']} scaling strategy and setting scaling factor to {rope_kwargs['factor']}." )
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/model/model_utils/attention.py
src/llamafactory/model/model_utils/attention.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...extras import logging from ...extras.constants import AttentionFunction from ...extras.packages import is_torch_version_greater_than if TYPE_CHECKING: from transformers import PretrainedConfig from ...hparams import ModelArguments logger = logging.get_logger(__name__) def configure_attn_implementation(config: "PretrainedConfig", model_args: "ModelArguments") -> None: from transformers.utils import is_flash_attn_2_available if getattr(config, "model_type", None) == "gpt_oss": from transformers.integrations.hub_kernels import load_and_register_kernel flash_attn3_kernel = "kernels-community/vllm-flash-attn3" load_and_register_kernel(flash_attn3_kernel) setattr(config, "_attn_implementation", flash_attn3_kernel) setattr(config, "_attn_implementation_internal", flash_attn3_kernel) model_args.flash_attn = AttentionFunction.FA3 logger.info_rank0("Using FlashAttention-3 with attention sink for the gpt-oss model.") return if getattr(config, "model_type", None) == "gemma2": if model_args.flash_attn == AttentionFunction.AUTO or model_args.flash_attn == AttentionFunction.FA2: if is_flash_attn_2_available(): if model_args.flash_attn != AttentionFunction.FA2: logger.warning_rank0("Gemma 2 should use flash attention 2, change `flash_attn` to fa2.") model_args.flash_attn = AttentionFunction.FA2 else: logger.warning_rank0("FlashAttention-2 is not installed, use eager attention.") model_args.flash_attn = AttentionFunction.DISABLED elif model_args.flash_attn == AttentionFunction.SDPA: logger.warning_rank0( "Gemma-2 should use soft-capping attention, while the SDPA attention does not support it." ) if model_args.flash_attn == AttentionFunction.AUTO: return elif model_args.flash_attn == AttentionFunction.DISABLED: requested_attn_implementation = "eager" elif model_args.flash_attn == AttentionFunction.SDPA: if not is_torch_version_greater_than("2.1.1"): logger.warning_rank0("torch>=2.1.1 is required for SDPA attention.") return requested_attn_implementation = "sdpa" elif model_args.flash_attn == AttentionFunction.FA2: from transformers import is_torch_npu_available if not (is_flash_attn_2_available() or is_torch_npu_available()): logger.warning_rank0("FlashAttention-2 is not installed.") return requested_attn_implementation = "flash_attention_2" else: raise NotImplementedError(f"Unknown attention type: {model_args.flash_attn}") if getattr(config, "model_type", None) == "internlm2": # special case for custom models setattr(config, "attn_implementation", requested_attn_implementation) elif getattr(config, "model_type", None) == "kimi_vl": setattr(config.vision_config, "_attn_implementation", requested_attn_implementation) setattr(config.text_config, "_attn_implementation", requested_attn_implementation) else: setattr(config, "_attn_implementation", requested_attn_implementation) def print_attn_implementation(config: "PretrainedConfig") -> None: if getattr(config, "model_type", None) == "internlm2": # special case for custom models attn_implementation = getattr(config, "attn_implementation", None) else: attn_implementation = getattr(config, "_attn_implementation", None) if attn_implementation == "flash_attention_2": logger.info_rank0("Using FlashAttention-2 for faster training and inference.") elif attn_implementation == "sdpa": logger.info_rank0("Using torch SDPA for faster training and inference.") else: logger.info_rank0("Using vanilla attention implementation.")
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/model/model_utils/valuehead.py
src/llamafactory/model/model_utils/valuehead.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from transformers.utils import cached_file from ...extras import logging from ...extras.constants import V_HEAD_SAFE_WEIGHTS_NAME, V_HEAD_WEIGHTS_NAME if TYPE_CHECKING: from transformers import PreTrainedModel from ...hparams import ModelArguments logger = logging.get_logger(__name__) def load_valuehead_params(path_or_repo_id: str, model_args: "ModelArguments") -> dict[str, torch.Tensor]: r"""Load value head parameters from Hugging Face Hub or local disk. Returns: dict with keys `v_head.summary.weight` and `v_head.summary.bias`. """ kwargs = {"path_or_repo_id": path_or_repo_id, "cache_dir": model_args.cache_dir, "token": model_args.hf_hub_token} err_text = "" try: from safetensors import safe_open vhead_file = cached_file(filename=V_HEAD_SAFE_WEIGHTS_NAME, **kwargs) with safe_open(vhead_file, framework="pt", device="cpu") as f: return {key: f.get_tensor(key) for key in f.keys()} except Exception as err: err_text = str(err) try: vhead_file = cached_file(filename=V_HEAD_WEIGHTS_NAME, **kwargs) return torch.load(vhead_file, map_location="cpu", weights_only=True) except Exception as err: err_text = str(err) logger.info_rank0(f"Provided path ({path_or_repo_id}) does not contain value head weights: {err_text}.") logger.info_rank0("Ignore the above message if you are not resuming the training of a value head model.") return None def prepare_valuehead_model(model: "PreTrainedModel") -> None: if getattr(model.config, "model_type", None) == "llava": setattr(model, "lm_head", model.language_model.get_output_embeddings()) setattr(model, "_keys_to_ignore_on_save", ["lm_head.weight"]) if getattr(model.config, "model_type", None) == "chatglm": setattr(model, "lm_head", model.transformer.output_layer) setattr(model, "_keys_to_ignore_on_save", ["lm_head.weight"]) if getattr(model.config, "model_type", None) == "internlm2": setattr(model, "lm_head", model.output) setattr(model, "_keys_to_ignore_on_save", ["lm_head.weight"])
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/model/model_utils/embedding.py
src/llamafactory/model/model_utils/embedding.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from contextlib import nullcontext from typing import TYPE_CHECKING, Optional import torch from transformers.integrations import is_deepspeed_zero3_enabled from ...extras import logging if TYPE_CHECKING: from transformers import PreTrainedModel, PreTrainedTokenizer logger = logging.get_logger(__name__) def _noisy_mean_initialization(embed_weight: "torch.Tensor", num_new_tokens: int) -> None: """Initialize new token embeddings with mean + Gaussian noise. This is the default initialization method used by LlamaFactory. Args: embed_weight: The embedding weight matrix to initialize (shape: [vocab_size, embedding_dim]) num_new_tokens: Number of new tokens added at the end of the embedding matrix """ embedding_dim = embed_weight.size(1) avg_weight = embed_weight[:-num_new_tokens].mean(dim=0, keepdim=True) noise_weight = torch.empty_like(embed_weight[-num_new_tokens:]) noise_weight.normal_(mean=0, std=(1.0 / math.sqrt(embedding_dim))) embed_weight[-num_new_tokens:] = avg_weight + noise_weight def _description_based_initialization( embed_weight: "torch.Tensor", num_new_tokens: int, descriptions: dict[str, str], tokenizer: "PreTrainedTokenizer", model: "PreTrainedModel", add_noise: bool = False, ) -> None: """Initialize new token embeddings based on textual descriptions. For each new token, this function: 1. Tokenizes its description text 2. Gets embeddings of the description tokens 3. Averages them to initialize the new token's embedding 4. Optionally adds Gaussian noise Args: embed_weight: The embedding weight matrix to initialize (shape: [vocab_size, embedding_dim]) num_new_tokens: Number of new tokens added descriptions: Dict mapping token string to its description text e.g., {"<think>": "A token representing reasoning process"} tokenizer: The tokenizer instance model: The model instance (used to get input embeddings) add_noise: Whether to add Gaussian noise to the initialization Example: descriptions = { "<|START_OF_SVG|>": "Marks the beginning of an SVG document", "<|END_OF_SVG|>": "Marks the end of an SVG document" } """ embedding_dim = embed_weight.size(1) for i, desc in enumerate(descriptions.values()): # Tokenize description text tokens = tokenizer(desc, return_tensors="pt", add_special_tokens=False) with torch.no_grad(): token_ids = tokens["input_ids"][0] # Move to the same device as embed_weight device = embed_weight.device token_ids = token_ids.to(device) # Filter out new tokens (they don't have valid embeddings yet) valid_token_ids = token_ids[token_ids < (len(tokenizer) - num_new_tokens)] if len(valid_token_ids) == 0: # Fallback: use mean of all existing embeddings logger.warning_rank0( f"Description for token {i + 1}/{num_new_tokens} contains no valid tokens. " "Using mean of existing embeddings." ) base_embedding = embed_weight[:-num_new_tokens].mean(dim=0) else: # Get embeddings of description tokens and average them token_embeds = model.get_input_embeddings()(valid_token_ids) base_embedding = token_embeds.mean(dim=0) # Add noise if requested (ensure correct device and dtype) if add_noise: noise = torch.randn_like(base_embedding) * (1.0 / math.sqrt(embedding_dim)) embed_weight[-num_new_tokens + i] = base_embedding + noise else: embed_weight[-num_new_tokens + i] = base_embedding def _initialize_embeddings( embed_weight: "torch.Tensor", num_new_tokens: int, init_method: str, new_special_tokens_config: Optional[dict], tokenizer: "PreTrainedTokenizer", model: "PreTrainedModel", ) -> None: """Single source of truth for embedding initialization. This function selects the appropriate initialization method and applies it. Args: embed_weight: The embedding weight matrix to initialize num_new_tokens: Number of new tokens added init_method: Initialization method ('noise_init', 'desc_init', 'desc_init_w_noise') new_special_tokens_config: Config dict with token descriptions (required for desc_init methods) tokenizer: The tokenizer instance model: The model instance """ if init_method == "desc_init" and new_special_tokens_config: logger.info_rank0("Using semantic initialization (desc_init) for new special tokens") _description_based_initialization( embed_weight, num_new_tokens, new_special_tokens_config, tokenizer, model, add_noise=False ) elif init_method == "desc_init_w_noise" and new_special_tokens_config: logger.info_rank0("Using semantic initialization with noise (desc_init_w_noise) for new special tokens") _description_based_initialization( embed_weight, num_new_tokens, new_special_tokens_config, tokenizer, model, add_noise=True ) else: if init_method != "noise_init": logger.warning_rank0( f"init_method='{init_method}' requires descriptions config, falling back to 'noise_init'" ) logger.info_rank0("Using noisy mean initialization (noise_init) for new special tokens") _noisy_mean_initialization(embed_weight, num_new_tokens) def resize_embedding_layer( model: "PreTrainedModel", tokenizer: "PreTrainedTokenizer", new_special_tokens_config: Optional[dict] = None, init_special_tokens: str = "noise_init", ) -> None: r"""Resize token embeddings and initialize new tokens. Args: model: The model to resize tokenizer: The tokenizer (used to get target vocab size) new_special_tokens_config: Optional dict with token descriptions for semantic initialization init_special_tokens: Initialization method ('noise_init', 'desc_init', 'desc_init_w_noise') """ if is_deepspeed_zero3_enabled(): import deepspeed # type: ignore params = [model.get_input_embeddings().weight] if model.get_output_embeddings() is not None and not model.config.tie_word_embeddings: params.append(model.get_output_embeddings().weight) context_maybe_zero3 = deepspeed.zero.GatheredParameters(params, modifier_rank=0) else: context_maybe_zero3 = nullcontext() with context_maybe_zero3: current_embedding_size = model.get_input_embeddings().weight.size(0) if len(tokenizer) > current_embedding_size: if getattr(model, "quantization_method", None): raise ValueError("Cannot resize embedding layers of a quantized model.") if not isinstance(model.get_output_embeddings(), torch.nn.Linear): raise ValueError("Current model does not support resizing embedding layers.") model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=64) with context_maybe_zero3: new_embedding_size = model.get_input_embeddings().weight.size(0) num_new_tokens = new_embedding_size - current_embedding_size logger.info_rank0( f"Resizing embeddings: {current_embedding_size} -> {new_embedding_size} (+{num_new_tokens} tokens)" ) # Initialize input embeddings _initialize_embeddings( model.get_input_embeddings().weight.data, num_new_tokens, init_special_tokens, new_special_tokens_config, tokenizer, model, ) # Initialize output embeddings if not tied if model.get_output_embeddings() is not None and not model.config.tie_word_embeddings: _initialize_embeddings( model.get_output_embeddings().weight.data, num_new_tokens, init_special_tokens, new_special_tokens_config, tokenizer, model, ) model.config.vocab_size = new_embedding_size logger.info_rank0(f"Resized token embeddings from {current_embedding_size} to {new_embedding_size}.")
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/model/model_utils/longlora.py
src/llamafactory/model/model_utils/longlora.py
# Copyright 2025 EleutherAI, HuggingFace Inc., Yukang Chen, and the LlamaFactory team. # # This code is based on the EleutherAI's GPT-NeoX and the HuggingFace's Transformers libraries. # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llama/modeling_llama.py # This code is also inspired by the original LongLoRA implementation. # https://github.com/dvlab-research/LongLoRA/blob/main/llama_attn_replace.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import TYPE_CHECKING, Optional import torch import torch.nn as nn import transformers from ...extras import logging from ...extras.constants import SUPPORTED_CLASS_FOR_S2ATTN from ...extras.misc import check_version from ...extras.packages import is_transformers_version_greater_than if not is_transformers_version_greater_than("4.48.0"): from transformers.modeling_flash_attention_utils import _flash_attention_forward from transformers.models.llama.modeling_llama import ( Cache, LlamaAttention, LlamaFlashAttention2, LlamaSdpaAttention, apply_rotary_pos_emb, repeat_kv, ) if TYPE_CHECKING: from transformers import PretrainedConfig from ...hparams import ModelArguments transformers_logger = transformers.utils.logging.get_logger(__name__) # Modified from: # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llama/modeling_llama.py def llama_attention_forward( self: "LlamaAttention", hidden_states: "torch.Tensor", attention_mask: Optional["torch.Tensor"] = None, position_ids: Optional["torch.LongTensor"] = None, past_key_value: Optional["Cache"] = None, output_attentions: bool = False, cache_position: Optional["torch.LongTensor"] = None, position_embeddings: Optional[tuple["torch.Tensor", "torch.Tensor"]] = None, **kwargs, ) -> tuple["torch.Tensor", Optional["torch.Tensor"], Optional[tuple["torch.Tensor"]]]: bsz, q_len, _ = hidden_states.size() query_states: torch.Tensor = self.q_proj(hidden_states) key_states: torch.Tensor = self.k_proj(hidden_states) value_states: torch.Tensor = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if position_embeddings is None: cos, sin = self.rotary_emb(value_states, position_ids) else: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if getattr(self.config, "group_size_ratio", None) and self.training: # shift groupsz = int(q_len * getattr(self.config, "group_size_ratio")) assert q_len % groupsz == 0, f"q_len {q_len} should be divisible by group size {groupsz}." num_groups = q_len // groupsz def shift(state: "torch.Tensor") -> "torch.Tensor": state = state.transpose(1, 2) # output: (bsz, seq_len, n_heads, head_dim) state = torch.cat( (state[:, :, : self.num_heads // 2], state[:, :, self.num_heads // 2 :].roll(-groupsz // 2, dims=1)), dim=2, ) return state.reshape(bsz * num_groups, groupsz, self.num_heads, self.head_dim).transpose(1, 2) query_states, key_states, value_states = shift(query_states), shift(key_states), shift(value_states) if attention_mask is not None: attention_mask = attention_mask[:, :, :groupsz, :groupsz].repeat(num_groups, 1, 1, 1) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) # (bsz, :, seq_len, :) or (bsz * n_group, :, groupsz, :) attn_output = attn_output.transpose(1, 2).contiguous() if getattr(self.config, "group_size_ratio", None) and self.training: # shift back attn_output.reshape(bsz, q_len, self.num_heads, self.head_dim) attn_output = torch.cat( ( attn_output[:, :, : self.num_heads // 2], attn_output[:, :, self.num_heads // 2 :].roll(groupsz // 2, dims=1), ), dim=2, ) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # Modified from: # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llama/modeling_llama.py def llama_flash_attention_2_forward( self: "LlamaFlashAttention2", hidden_states: "torch.Tensor", attention_mask: Optional["torch.Tensor"] = None, position_ids: Optional["torch.LongTensor"] = None, past_key_value: Optional["Cache"] = None, output_attentions: bool = False, cache_position: Optional["torch.LongTensor"] = None, position_embeddings: Optional[tuple["torch.Tensor", "torch.Tensor"]] = None, **kwargs, ) -> tuple["torch.Tensor", Optional["torch.Tensor"], Optional[tuple["torch.Tensor"]]]: # LlamaFlashAttention2 attention does not support output_attentions output_attentions = False bsz, q_len, _ = hidden_states.size() query_states: torch.Tensor = self.q_proj(hidden_states) key_states: torch.Tensor = self.k_proj(hidden_states) value_states: torch.Tensor = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if position_embeddings is None: cos, sin = self.rotary_emb(value_states, position_ids) else: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) # FlashAttention requires the input to have the shape (bsz, seq_len, n_heads, head_dim) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype transformers_logger.warning_once("The input hidden states seems to be silently casted in float32.") query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) if getattr(self.config, "group_size_ratio", None) and self.training: # shift groupsz = int(q_len * getattr(self.config, "group_size_ratio")) assert q_len % groupsz == 0, f"q_len {q_len} should be divisible by group size {groupsz}." num_groups = q_len // groupsz def shift(state: "torch.Tensor") -> "torch.Tensor": state = torch.cat( (state[:, :, : self.num_heads // 2], state[:, :, self.num_heads // 2 :].roll(-groupsz // 2, dims=1)), dim=2, ) return state.reshape(bsz * num_groups, groupsz, self.num_heads, self.head_dim) query_states, key_states, value_states = shift(query_states), shift(key_states), shift(value_states) if attention_mask is not None: attention_mask = attention_mask[:, :groupsz].repeat(num_groups, 1) attn_output: torch.Tensor = _flash_attention_forward( query_states, key_states, value_states, attention_mask, query_states.size(1), dropout=dropout_rate, sliding_window=getattr(self, "sliding_window", None), use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal, ) if getattr(self.config, "group_size_ratio", None) and self.training: # shift back attn_output.reshape(bsz, q_len, self.num_heads, self.head_dim) attn_output = torch.cat( ( attn_output[:, :, : self.num_heads // 2], attn_output[:, :, self.num_heads // 2 :].roll(groupsz // 2, dims=1), ), dim=2, ) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # Modified from: # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llama/modeling_llama.py def llama_sdpa_attention_forward( self: "LlamaSdpaAttention", hidden_states: "torch.Tensor", attention_mask: Optional["torch.Tensor"] = None, position_ids: Optional["torch.LongTensor"] = None, past_key_value: Optional["Cache"] = None, output_attentions: bool = False, cache_position: Optional["torch.LongTensor"] = None, position_embeddings: Optional[tuple["torch.Tensor", "torch.Tensor"]] = None, **kwargs, ) -> tuple["torch.Tensor", Optional["torch.Tensor"], Optional[tuple["torch.Tensor"]]]: if output_attentions: transformers_logger.warning_once( "SDPA does not support `output_attentions=True`. Falling back to the vanilla attention" ) return llama_attention_forward( self, hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, cache_position=cache_position, **kwargs, ) bsz, q_len, _ = hidden_states.size() query_states: torch.Tensor = self.q_proj(hidden_states) key_states: torch.Tensor = self.k_proj(hidden_states) value_states: torch.Tensor = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if position_embeddings is None: cos, sin = self.rotary_emb(value_states, position_ids) else: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if getattr(self.config, "group_size_ratio", None) and self.training: # shift groupsz = int(q_len * getattr(self.config, "group_size_ratio")) assert q_len % groupsz == 0, f"q_len {q_len} should be divisible by group size {groupsz}." num_groups = q_len // groupsz def shift(state: "torch.Tensor") -> "torch.Tensor": state = state.transpose(1, 2) # output: (bsz, seq_len, n_heads, head_dim) state = torch.cat( (state[:, :, : self.num_heads // 2], state[:, :, self.num_heads // 2 :].roll(-groupsz // 2, dims=1)), dim=2, ) return state.reshape(bsz * num_groups, groupsz, self.num_heads, self.head_dim).transpose(1, 2) query_states, key_states, value_states = shift(query_states), shift(key_states), shift(value_states) if attention_mask is not None: attention_mask = attention_mask[:, :, :groupsz, :groupsz].repeat(num_groups, 1, 1, 1) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] if query_states.device.type == "cuda" and causal_mask is not None: # avoid pytorch bug query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() is_causal = True if causal_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal, ) attn_output = attn_output.transpose(1, 2).contiguous() if getattr(self.config, "group_size_ratio", None) and self.training: # shift back attn_output.reshape(bsz, q_len, self.num_heads, self.head_dim) attn_output = torch.cat( ( attn_output[:, :, : self.num_heads // 2], attn_output[:, :, self.num_heads // 2 :].roll(groupsz // 2, dims=1), ), dim=2, ) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value def _apply_llama_patch() -> None: check_version("transformers>=4.45.0,<4.48.0", mandatory=True) LlamaAttention.forward = llama_attention_forward LlamaFlashAttention2.forward = llama_flash_attention_2_forward LlamaSdpaAttention.forward = llama_sdpa_attention_forward def configure_longlora(config: "PretrainedConfig", model_args: "ModelArguments", is_trainable: bool) -> None: if not is_trainable or not model_args.shift_attn: return logger = logging.get_logger(__name__) if getattr(config, "model_type", None) in SUPPORTED_CLASS_FOR_S2ATTN: setattr(config, "group_size_ratio", 0.25) _apply_llama_patch() logger.info_rank0("Using shift short attention with group_size_ratio=1/4.") else: logger.warning_rank0("Current model does not support shift short attention.")
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/launcher.py
src/llamafactory/v1/launcher.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from ..extras.env import VERSION, print_env USAGE = ( "-" * 70 + "\n" + "| Usage: |\n" + "| llamafactory-cli sft -h: train models |\n" + "| llamafactory-cli version: show version info |\n" + "| Hint: You can use `lmf` as a shortcut for `llamafactory-cli`. |\n" + "-" * 70 ) WELCOME = ( "-" * 58 + "\n" + f"| Welcome to LLaMA Factory, version {VERSION}" + " " * (21 - len(VERSION)) + "|\n|" + " " * 56 + "|\n" + "| Project page: https://github.com/hiyouga/LLaMA-Factory |\n" + "-" * 58 ) def launch(): command = sys.argv.pop(1) if len(sys.argv) > 1 else "help" if command == "sft": # train command will fallback to sft command from .trainers.sft_trainer import run_sft run_sft() elif command == "env": print_env() elif command == "version": print(WELCOME) elif command == "help": print(USAGE) else: print(f"Unknown command: {command}.\n{USAGE}") if __name__ == "__main__": pass
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/__init__.py
src/llamafactory/v1/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/trainers/dpo_trainer.py
src/llamafactory/v1/trainers/dpo_trainer.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/trainers/rm_trainer.py
src/llamafactory/v1/trainers/rm_trainer.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/trainers/__init__.py
src/llamafactory/v1/trainers/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/trainers/sft_trainer.py
src/llamafactory/v1/trainers/sft_trainer.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..accelerator.interface import DistributedInterface from ..config.arg_parser import get_args from ..core.base_trainer import BaseTrainer from ..core.data_engine import DataEngine from ..core.model_loader import ModelLoader class SFTTrainer(BaseTrainer): pass def run_sft(user_args): model_args, data_args, training_args, _ = get_args(user_args) DistributedInterface(training_args.dist_config) data_engine = DataEngine(data_args) model_loader = ModelLoader(model_args) trainer = SFTTrainer( args=training_args, model=model_loader.model, processor=model_loader.processor, dataset=data_engine, ) trainer.fit()
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/utils/batching_queue.py
src/llamafactory/v1/utils/batching_queue.py
# Copyright 2025 Bytedance Ltd. and the LlamaFactory team. # # This code is inspired by the Bytedance's VeOmni library. # https://github.com/ByteDance-Seed/VeOmni/blob/v0.1.4/veomni/data/dynamic_batching.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod class DynamicBatchSizeBuffer: """A buffer to store samples for dynamic batch size.""" def __init__(self): self._buffer: list[dict[str, any]] = [] self._buffer_sample_lengths: list[int] = [] self._deleted_indices: set[int] = set() self._current_index: int = 0 self._total_token_count: int = 0 def append(self, item: dict[str, any]) -> None: """Append a sample to the buffer. Args: item: A sample to append to the buffer. The sample should be a dict with the following keys: - input_ids: torch.Tensor of shape (seq_len, ) - attention_mask: torch.Tensor of shape (seq_len, ) """ self._buffer.append(item) sample_length = int(item["attention_mask"].sum().item()) self._buffer_sample_lengths.append(sample_length) self._total_token_count += sample_length def get_samples(self, max_tokens_per_iteration: int, force: bool = True) -> list[dict[str, any]]: """Get samples from the buffer that fit within the token budget. Args: max_tokens_per_iteration: Maximum number of tokens to retrieve. force: If True, the first available sample will be returned even if it exceeds the token budget. Returns: A list of samples that fit within the token budget. Raises: AssertionError: If no samples are found (should not happen in normal operation). """ cum_seq_len = 0 samples = [] while self._current_index < len(self._buffer) and cum_seq_len < max_tokens_per_iteration: if self._current_index in self._deleted_indices: self._current_index += 1 continue seq_len = self._buffer_sample_lengths[self._current_index] remaining_tokens = max_tokens_per_iteration - cum_seq_len # Check if we can add this sample can_add = (force and cum_seq_len == 0) or (seq_len <= remaining_tokens) if can_add: cum_seq_len += seq_len samples.append(self._buffer[self._current_index]) self._deleted_indices.add(self._current_index) self._current_index += 1 assert len(samples) > 0, "No samples found in buffer" return samples def __len__(self) -> int: """Return the number of samples in the buffer.""" return len(self._buffer) @property def total_token_count(self) -> int: """Return the total number of tokens in the buffer.""" return self._total_token_count def flush(self) -> None: tokens_to_remove = sum(self._buffer_sample_lengths[idx] for idx in self._deleted_indices) self._total_token_count -= tokens_to_remove buffer_length = len(self._buffer) self._buffer = [self._buffer[idx] for idx in range(buffer_length) if idx not in self._deleted_indices] self._buffer_sample_lengths = [ self._buffer_sample_lengths[idx] for idx in range(buffer_length) if idx not in self._deleted_indices ] self._current_index = 0 self._deleted_indices.clear() class BaseBatchingQueue(ABC): """Base class for batching queue.""" @abstractmethod def is_full_filled(self) -> bool: raise NotImplementedError("Subclasses must implement `is_full_filled`") @abstractmethod def put_item(self, item: dict[str, any]) -> None: raise NotImplementedError("Subclasses must implement `put_item`") @abstractmethod def get_micro_batch(self, step: int) -> list[dict[str, any]]: raise NotImplementedError("Subclasses must implement `get_micro_batch`") @abstractmethod def empty(self) -> bool: raise NotImplementedError("Subclasses must implement `empty`") class IdentityPacker: def __init__(self, token_micro_bsz, bsz_warmup_steps, bsz_warmup_init_mbtoken): self.token_micro_bsz = token_micro_bsz self.bsz_warmup_steps = bsz_warmup_steps self.bsz_warmup_init_mbtoken = bsz_warmup_init_mbtoken def __call__(self, samples): return samples def get_token_num_to_request(self, cur_step, warmup): return ( (self.token_micro_bsz - self.bsz_warmup_init_mbtoken) * cur_step // self.bsz_warmup_steps + self.bsz_warmup_init_mbtoken if warmup else self.token_micro_bsz ) class TextBatchingQueue(BaseBatchingQueue): """Batching text queue for text data.""" def __init__( self, token_micro_bsz, buffer_size: int = 500, bsz_warmup_steps: int = -1, bsz_warmup_init_mbtoken: int = 200, ) -> None: super().__init__() self._step = 0 self.token_micro_bsz = token_micro_bsz self.bsz_warmup_steps = bsz_warmup_steps self.buffer_size = buffer_size # minimum samples in buffer self.buffer = DynamicBatchSizeBuffer() self.bsz_warmup_init_mbtoken = bsz_warmup_init_mbtoken # training warmup args assert self.bsz_warmup_init_mbtoken >= 0 self.packer = IdentityPacker( token_micro_bsz=token_micro_bsz, bsz_warmup_steps=bsz_warmup_steps, bsz_warmup_init_mbtoken=bsz_warmup_init_mbtoken, ) def is_full_filled(self) -> bool: return len(self.buffer) >= self.buffer_size and self.buffer.total_token_count >= self.token_micro_bsz def put_item(self, item: dict[str, any]): if len(item["input_ids"]) == 1: print("WARNING: EMPTY STRING.") return self.buffer.append(item) def get_token_num_to_request(self): if self.packer is not None: warmup = self._step <= self.bsz_warmup_steps and self.bsz_warmup_steps > 0 return self.packer.get_token_num_to_request(self._step, warmup=warmup) else: return self.get_cur_token_micro_bsz() def get_cur_token_micro_bsz(self): warmup = self._step <= self.bsz_warmup_steps and self.bsz_warmup_steps > 0 if warmup: return ( self.token_micro_bsz - self.bsz_warmup_init_mbtoken ) * self._step // self.bsz_warmup_steps + self.bsz_warmup_init_mbtoken else: return self.token_micro_bsz def get_micro_batch(self, step) -> any: """Get a micro batch from the buffer according to the current step. Args: step: the current step. Returns: data: a list of samples. """ self._step = step n_token_per_iter = self.get_token_num_to_request() cur_token_micro_bsz = self.get_cur_token_micro_bsz() assert cur_token_micro_bsz % n_token_per_iter == 0, ( "The token num to get for each request should be divisible by token micro bsz." ) n_iter = int(cur_token_micro_bsz // n_token_per_iter) data = [] for _ in range(n_iter): samples = self.buffer.get_samples(n_token_per_iter) if self.packer: samples = self.packer(samples) # maybe packed into one sample, but wrapped in list. data.extend(samples) self.buffer.flush() # remove the selected samples. return data def empty(self) -> bool: return len(self.buffer) == 0
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/utils/constants.py
src/llamafactory/v1/utils/constants.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/utils/plugin.py
src/llamafactory/v1/utils/plugin.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Callable from . import logging logger = logging.get_logger(__name__) class BasePlugin: """Base class for plugins. A plugin is a callable object that can be registered and called by name. """ _registry: dict[str, Callable] = {} def __init__(self, name: str | None = None): """Initialize the plugin with a name. Args: name (str): The name of the plugin. """ self.name = name @property def register(self): """Decorator to register a function as a plugin. Example usage: ```python @PrintPlugin("hello").register() def print_hello(): print("Hello world!") ``` """ if self.name is None: raise ValueError("Plugin name is not specified.") if self.name in self._registry: logger.warning_rank0_once(f"Plugin {self.name} is already registered.") def decorator(func: Callable) -> Callable: self._registry[self.name] = func return func return decorator def __call__(self, *args, **kwargs): """Call the registered function with the given arguments. Example usage: ```python PrintPlugin("hello")() ``` """ if self.name not in self._registry: raise ValueError(f"Plugin {self.name} is not registered.") return self._registry[self.name](*args, **kwargs) if __name__ == "__main__": """ python -m llamafactory.v1.utils.plugin """ class PrintPlugin(BasePlugin): pass @PrintPlugin("hello").register def print_hello(): print("Hello world!") PrintPlugin("hello")()
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/utils/dtype.py
src/llamafactory/v1/utils/dtype.py
# Copyright 2025 Bytedance Ltd. and the LlamaFactory team. # # This code is inspired by the Bytedance's verl library. # https://github.com/volcengine/verl/blob/v0.6.1/verl/utils/torch_dtypes.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from contextlib import contextmanager import torch from transformers.utils import is_torch_bf16_available_on_device, is_torch_fp16_available_on_device from ..accelerator.interface import DistributedInterface class DtypeRegistry: HALF_LIST = ["fp16", "float16", "half", torch.float16] FLOAT_LIST = ["fp32", "float32", "float", torch.float32] BFLOAT_LIST = ["bf16", "bfloat16", torch.bfloat16] class DtypeInterface: """Type of precision used.""" _is_fp16_available = is_torch_fp16_available_on_device(DistributedInterface.current_accelerator) _is_bf16_available = is_torch_bf16_available_on_device(DistributedInterface.current_accelerator) _is_fp32_available = True @staticmethod def is_available(precision: str | torch.dtype) -> bool: if precision in DtypeRegistry.HALF_LIST: return DtypeInterface._is_fp16_available elif precision in DtypeRegistry.FLOAT_LIST: return DtypeInterface._is_fp32_available elif precision in DtypeRegistry.BFLOAT_LIST: return DtypeInterface._is_bf16_available else: raise RuntimeError(f"Unexpected precision: {precision}") @staticmethod def is_fp16(precision: str | torch.dtype) -> bool: return precision in DtypeRegistry.HALF_LIST @staticmethod def is_fp32(precision: str | torch.dtype) -> bool: return precision in DtypeRegistry.FLOAT_LIST @staticmethod def is_bf16(precision: str | torch.dtype) -> bool: return precision in DtypeRegistry.BFLOAT_LIST @staticmethod def to_dtype(precision: str | torch.dtype) -> torch.dtype: if precision in DtypeRegistry.HALF_LIST: return torch.float16 elif precision in DtypeRegistry.FLOAT_LIST: return torch.float32 elif precision in DtypeRegistry.BFLOAT_LIST: return torch.bfloat16 else: raise RuntimeError(f"Unexpected precision: {precision}") @staticmethod def to_str(precision: torch.dtype) -> str: if precision == torch.float16: return "float16" elif precision == torch.float32: return "float32" elif precision == torch.bfloat16: return "bfloat16" else: raise RuntimeError(f"Unexpected precision: {precision}") @contextmanager def set_dtype(self, precision: str | torch.dtype): original_dtype = torch.get_default_dtype() torch.set_default_dtype(self.to_dtype(precision)) try: yield finally: torch.set_default_dtype(original_dtype)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/utils/logging.py
src/llamafactory/v1/utils/logging.py
# Copyright 2025 Optuna, HuggingFace Inc. and the LlamaFactory team. # # This code is inspired by the HuggingFace's transformers library. # https://github.com/huggingface/transformers/blob/v5.0.0rc0/src/transformers/utils/logging.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import threading from functools import lru_cache from typing import Optional _thread_lock = threading.RLock() _default_handler: Optional["logging.Handler"] = None _default_log_level: "logging._Level" = logging.INFO class _Logger(logging.Logger): """A logger that supports rank0 logging.""" def info_rank0(self, *args, **kwargs) -> None: self.info(*args, **kwargs) def warning_rank0(self, *args, **kwargs) -> None: self.warning(*args, **kwargs) def warning_rank0_once(self, *args, **kwargs) -> None: self.warning(*args, **kwargs) def _get_default_logging_level() -> "logging._Level": """Return the default logging level.""" env_level_str = os.getenv("LLAMAFACTORY_VERBOSITY", None) if env_level_str: if env_level_str.upper() in logging._nameToLevel: return logging._nameToLevel[env_level_str.upper()] else: raise ValueError(f"Unknown logging level: {env_level_str}.") return _default_log_level def _get_library_name() -> str: return __name__.split(".")[0] def _get_library_root_logger() -> "_Logger": return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: """Configure root logger using a stdout stream handler with an explicit format.""" global _default_handler with _thread_lock: if _default_handler: # already configured return formatter = logging.Formatter( fmt="[%(levelname)s|%(asctime)s] %(name)s:%(lineno)s >> %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) _default_handler = logging.StreamHandler(sys.stdout) _default_handler.setFormatter(formatter) library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False def get_logger(name: str | None = None) -> "_Logger": """Return a logger with the specified name. It it not supposed to be accessed externally.""" if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name) def add_handler(handler: "logging.Handler") -> None: """Add a handler to the root logger.""" _configure_library_root_logger() _get_library_root_logger().addHandler(handler) def remove_handler(handler: logging.Handler) -> None: """Remove a handler to the root logger.""" _configure_library_root_logger() _get_library_root_logger().removeHandler(handler) def info_rank0(self: "logging.Logger", *args, **kwargs) -> None: if int(os.getenv("LOCAL_RANK", "0")) == 0: self.info(*args, **kwargs) def warning_rank0(self: "logging.Logger", *args, **kwargs) -> None: if int(os.getenv("LOCAL_RANK", "0")) == 0: self.warning(*args, **kwargs) @lru_cache(None) def warning_rank0_once(self: "logging.Logger", *args, **kwargs) -> None: if int(os.getenv("LOCAL_RANK", "0")) == 0: self.warning(*args, **kwargs) logging.Logger.info_rank0 = info_rank0 logging.Logger.warning_rank0 = warning_rank0 logging.Logger.warning_rank0_once = warning_rank0_once
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/utils/__init__.py
src/llamafactory/v1/utils/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/utils/types.py
src/llamafactory/v1/utils/types.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING, Literal, NotRequired, TypedDict, Union if TYPE_CHECKING: import datasets import numpy as np import torch import torch.utils.data import transformers from torch.distributed import ProcessGroup from torch.distributed.fsdp import FullyShardedDataParallel Tensor = torch.Tensor TensorLike = Union[int, float, list[int], list[float], np.ndarray, Tensor] TorchDataset = Union[torch.utils.data.Dataset, torch.utils.data.IterableDataset] HFDataset = Union[datasets.Dataset, datasets.IterableDataset] DataCollator = transformers.DataCollator DataLoader = torch.utils.data.DataLoader HFConfig = transformers.PretrainedConfig HFModel = transformers.PreTrainedModel DistModel = Union[torch.nn.parallel.DistributedDataParallel, FullyShardedDataParallel] Processor = Union[transformers.PreTrainedTokenizer, transformers.ProcessorMixin] Optimizer = torch.optim.Optimizer Scheduler = torch.optim.lr_scheduler.LRScheduler ProcessGroup = ProcessGroup else: Tensor = None TensorLike = None TorchDataset = None HFDataset = None DataCollator = None DataLoader = None HFConfig = None HFModel = None DistModel = None Processor = None Optimizer = None Scheduler = None ProcessGroup = None class DatasetInfo(TypedDict, total=False): path: str """Local file path.""" source: NotRequired[Literal["hf_hub", "ms_hub", "local"]] """Dataset source, default to "hf_hub".""" split: NotRequired[str] """Dataset split, default to "train".""" converter: NotRequired[str] """Dataset converter, default to None.""" size: NotRequired[int] """Number of samples, default to all samples.""" weight: NotRequired[float] """Dataset weight, default to 1.0.""" streaming: NotRequired[bool] """Is streaming dataset, default to False.""" class DistributedConfig(TypedDict, total=False): mp_replicate_size: NotRequired[int] """Model parallel replicate size, default to 1.""" mp_shard_size: NotRequired[int] """Model parallel shard size, default to world_size // mp_replicate_size.""" dp_size: NotRequired[int] """Data parallel size, default to world_size // cp_size.""" cp_size: NotRequired[int] """Context parallel size, default to 1.""" timeout: NotRequired[int] """Timeout for distributed communication, default to 600.""" class Content(TypedDict): type: Literal["text", "reasoning", "tools", "tool_calls", "image_url"] value: str class Message(TypedDict): role: Literal["system", "user", "assistant", "tool"] content: list[Content] loss_weight: float class SFTSample(TypedDict): messages: list[Message] extra_info: NotRequired[str] _dataset_name: NotRequired[str] class DPOSample(TypedDict): chosen_messages: list[Message] rejected_messages: list[Message] extra_info: NotRequired[str] _dataset_name: NotRequired[str] Sample = Union[SFTSample, DPOSample]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/utils/pytest.py
src/llamafactory/v1/utils/pytest.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from contextlib import contextmanager @contextmanager def dist_env(local_rank: int = 0, world_size: int = 1, master_port: int = 25595): """Set distributed environment variables.""" env_vars = { "MASTER_ADDR": "127.0.0.1", "MASTER_PORT": str(master_port), "RANK": str(local_rank), "LOCAL_RANK": str(local_rank), "WORLD_SIZE": str(world_size), "LOCAL_WORLD_SIZE": str(world_size), } os.environ.update(env_vars) try: yield finally: for key in env_vars.keys(): os.environ.pop(key, None)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/utils/env.py
src/llamafactory/v1/utils/env.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import socket def find_available_port() -> int: """Find an available port on the local machine.""" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("", 0)) port = sock.getsockname()[1] sock.close() return port def is_env_enabled(env_var: str, default: str = "0") -> bool: """Check if the environment variable is enabled.""" return os.getenv(env_var, default).lower() in ["true", "yes", "on", "t", "y", "1"]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/utils/packages.py
src/llamafactory/v1/utils/packages.py
# Copyright 2025 HuggingFace Inc. and the LlamaFactory team. # # This code is inspired by the HuggingFace's transformers library. # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/utils/import_utils.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.metadata import importlib.util from functools import lru_cache from typing import TYPE_CHECKING from packaging import version if TYPE_CHECKING: from packaging.version import Version def _is_package_available(name: str) -> bool: return importlib.util.find_spec(name) is not None def _get_package_version(name: str) -> "Version": try: return version.parse(importlib.metadata.version(name)) except Exception: return version.parse("0.0.0") @lru_cache def is_transformers_version_greater_than(content: str): return _get_package_version("transformers") >= version.parse(content)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/__init__.py
src/llamafactory/v1/plugins/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/trainer_plugins/__init__.py
src/llamafactory/v1/plugins/trainer_plugins/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/trainer_plugins/distributed/accelerate.py
src/llamafactory/v1/plugins/trainer_plugins/distributed/accelerate.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/trainer_plugins/distributed/__init__.py
src/llamafactory/v1/plugins/trainer_plugins/distributed/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/trainer_plugins/distributed/deepspeed.py
src/llamafactory/v1/plugins/trainer_plugins/distributed/deepspeed.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/sampler_plugins/vllm.py
src/llamafactory/v1/plugins/sampler_plugins/vllm.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/sampler_plugins/__init__.py
src/llamafactory/v1/plugins/sampler_plugins/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/data_plugins/loader.py
src/llamafactory/v1/plugins/data_plugins/loader.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random from typing import Any, Literal from datasets import load_dataset from ...utils.plugin import BasePlugin from ...utils.types import DatasetInfo, HFDataset class DataLoaderPlugin(BasePlugin): """Plugin for loading dataset.""" def load(self, dataset_info: DatasetInfo) -> HFDataset: path = dataset_info["path"] split = dataset_info.get("split", "train") streaming = dataset_info.get("streaming", False) return super().__call__(path, split, streaming) def _get_builder_name(path: str) -> Literal["arrow", "csv", "json", "parquet", "text"]: """Get dataset builder name. Args: path (str): Dataset path. Returns: Literal["arrow", "csv", "json", "parquet", "text"]: Dataset builder name. """ filetype = os.path.splitext(path)[-1][1:] if filetype in ["arrow", "csv", "json", "jsonl", "parquet", "txt"]: return filetype.replace("jsonl", "json").replace("txt", "text") else: raise ValueError(f"Unknown dataset filetype: {filetype}.") @DataLoaderPlugin("local").register def load_data_from_file(filepath: str, split: str, streaming: bool) -> HFDataset: if os.path.isdir(filepath): filetype = _get_builder_name(os.listdir(filepath)[0]) dataset = load_dataset(filetype, data_dir=filepath, split=split) elif os.path.isfile(filepath): filetype = _get_builder_name(filepath) dataset = load_dataset(filetype, data_files=filepath, split=split) else: raise ValueError(f"Can not load dataset from {filepath}.") if streaming: # faster when data is streamed from local files dataset = dataset.to_iterable_dataset() return dataset class DataIndexPlugin(BasePlugin): """Plugin for adjusting dataset index.""" def adjust_data_index( self, data_index: list[tuple[str, int]], size: int | None, weight: float | None ) -> list[tuple[str, int]]: """Adjust dataset index by size and weight. Args: data_index (list[tuple[str, int]]): List of (dataset_name, sample_index). size (Optional[int]): Desired dataset size. weight (Optional[float]): Desired dataset weight. Returns: list[tuple[str, int]]: Adjusted dataset index. """ if size is not None: data_index = random.choices(data_index, k=size) if weight is not None: data_index = random.choices(data_index, k=int(len(data_index) * weight)) return data_index class DataSelectorPlugin(BasePlugin): """Plugin for selecting dataset samples.""" def select( self, data_index: list[tuple[str, int]], index: slice | list[int] | Any ) -> tuple[str, int] | list[tuple[str, int]]: """Select dataset samples. Args: data_index (list[tuple[str, int]]): List of (dataset_name, sample_index). index (Union[slice, list[int], Any]): Index of dataset samples. Returns: Union[tuple[str, int], list[tuple[str, int]]]: Selected dataset samples. """ if isinstance(index, slice): return [data_index[i] for i in range(*index.indices(len(data_index)))] elif isinstance(index, list): return [data_index[i] for i in index] else: raise ValueError(f"Invalid index type {type(index)}.")
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/data_plugins/template.py
src/llamafactory/v1/plugins/data_plugins/template.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass @dataclass class Template: user_template: str assistant_template: str system_template: str def render_message(self, message: dict[str, str]) -> str: return self.user_template.format(**message) @dataclass class QwenTemplate: message_template: str = "<|im_start|>{role}\n{content}<|im_end|>\n" # FIXME if role: tool thinking_template: str = "<think>\n{content}\n</think>\n\n" def _extract_content(self, content_data: str | list[dict[str, str]]) -> str: if isinstance(content_data, str): return content_data.strip() if isinstance(content_data, list): parts = [] for item in content_data: if item.get("type") == "text": parts.append(item.get("value", "")) elif item.get("type") == "image_url": pass return "\n".join(parts).strip() return "" def render_message(self, message: dict[str, str | list[dict[str, str]]]) -> str: role = message["role"] content = self._extract_content(message.get("content", "")) if role == "assistant": reasoning_content = message.get("reasoning_content", "") if reasoning_content: reasoning_content = self.thinking_template.format(content=str(reasoning_content).strip()) return self.message_template.format(role="assistant", content=reasoning_content + content) else: return self.message_template.format(role=role, content=content) def encode_messages(self, tokenizer, messages: list[dict[str, str]], max_seq_len: int = 8192) -> any: """Encode one message.""" input_ids, attention_mask, labels = [], [], [] for message in messages: content_str = self.render_message(message) content_ids = tokenizer.encode(content_str, add_special_tokens=False) input_ids += content_ids attention_mask += [1] * len(content_ids) if hasattr(message, "loss_weight"): loss_weight = message["loss_weight"] else: loss_weight = 1 if message["role"] == "assistant" else 0 if loss_weight == 1: labels += content_ids else: labels += [-100] * len(content_ids) model_inputs = {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels} model_inputs.update({"position_ids": list(range(len(input_ids)))}) model_inputs = {k: v[-max_seq_len:] for k, v in model_inputs.items()} return model_inputs if __name__ == "__main__": def to_qwen3_messages(template: QwenTemplate, messages: list[dict]): out = [] for m in messages: role = m["role"] content = template._extract_content(m.get("content", "")) if role == "assistant": reasoning = (m.get("reasoning_content") or "").strip() if reasoning: content = template.thinking_template.format(content=reasoning) + content out.append({"role": role, "content": content}) return out from transformers import AutoTokenizer tok = AutoTokenizer.from_pretrained( "Qwen/Qwen3-30B-A3B-Thinking-2507", trust_remote_code=True, ) test_messages = [ {"role": "system", "content": "You are a helpful assistant."}, { "role": "user", "content": [{"type": "text", "text": "1+1等于几?"}, {"type": "text", "text": "2+2等于几?"}], }, { "role": "assistant", "reasoning_content": "这是一个简单的数学问题。1加1的结果是2。", "content": [{"type": "text", "text": "1+1=2"}, {"type": "text", "text": "2+2=4"}], }, ] template = QwenTemplate() rendered_custom = "".join([template.render_message(m) for m in test_messages]) qwen3_messages = to_qwen3_messages(template, test_messages) rendered_hf = tok.apply_chat_template(qwen3_messages, tokenize=False, add_generation_prompt=False) print("==== custom ====") print(rendered_custom) print("==== hf ====") print(rendered_hf) assert rendered_custom.strip() == rendered_hf.strip(), "Rendered text mismatch" ids_custom = tok.encode(rendered_custom, add_special_tokens=False) ids_hf = tok.apply_chat_template(qwen3_messages, tokenize=True, add_generation_prompt=False) assert ids_custom == ids_hf, f"Token ids mismatch: custom={len(ids_custom)} hf={len(ids_hf)}"
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/data_plugins/converter.py
src/llamafactory/v1/plugins/data_plugins/converter.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Literal, NotRequired, TypedDict from ...utils import logging from ...utils.plugin import BasePlugin from ...utils.types import DPOSample, Sample, SFTSample logger = logging.get_logger(__name__) class AlpacaSample(TypedDict, total=False): system: NotRequired[str] instruction: str input: NotRequired[str] output: str SharegptMessage = TypedDict( "SharegptMessage", {"from": Literal["human", "gpt", "system", "function_call", "observation"], "value": str} ) class SharegptSample(TypedDict, total=False): conversations: list[SharegptMessage] tools: NotRequired[str] class OpenaiMessage(TypedDict, total=False): role: Literal["user", "assistant", "tool"] content: str class OpenaiSample(TypedDict, total=False): messages: list[OpenaiMessage] class PairSample(TypedDict, total=False): chosen: list[OpenaiMessage] rejected: list[OpenaiMessage] class DataConverterPlugin(BasePlugin): """Plugin for data converters.""" def __call__(self, raw_sample: dict[str, Any]) -> Sample: return super().__call__(raw_sample) @DataConverterPlugin("alpaca").register def alpaca_converter(raw_sample: AlpacaSample) -> SFTSample: """Convert Alpaca sample to SFT sample. See raw example at: https://huggingface.co/datasets/llamafactory/alpaca_gpt4_en Args: raw_sample (AlpacaSample): Alpaca sample. Returns: SFTSample: SFT sample. """ messages = [] if "system" in raw_sample: messages.append( {"role": "system", "content": [{"type": "text", "value": raw_sample["system"]}], "loss_weight": 0.0} ) if "instruction" in raw_sample or "input" in raw_sample: messages.append( { "role": "user", "content": [ {"type": "text", "value": raw_sample.get("instruction", "") + raw_sample.get("input", "")} ], "loss_weight": 0.0, } ) if "output" in raw_sample: messages.append( {"role": "assistant", "content": [{"type": "text", "value": raw_sample["output"]}], "loss_weight": 1.0} ) return {"messages": messages} @DataConverterPlugin("sharegpt").register def sharegpt_converter(raw_sample: SharegptSample) -> SFTSample: """Convert ShareGPT sample to SFT sample. See raw example at: https://huggingface.co/datasets/llamafactory/glaive_toolcall_en Args: raw_sample (SharegptSample): ShareGPT sample. Returns: SFTSample: SFT sample. """ tag_mapping = { "system": "system", "human": "user", "gpt": "assistant", "observation": "tool", "function_call": "assistant", } messages = [] tools = raw_sample.get("tools", "") for message in raw_sample.get("conversations", []): tag = message["from"] if tag not in tag_mapping: logger.warning_rank0(f"Unsupported role tag {tag} in message: {message}") elif tag == "function_call": messages.append( { "role": "assistant", "content": [{"type": "tool_calls", "value": message["value"]}], "loss_weight": 1.0, } ) else: messages.append( { "role": tag_mapping[tag], "content": [{"type": "text", "value": message["value"]}], "loss_weight": 1.0 if tag == "gpt" else 0.0, } ) if tools: if messages and messages[0]["role"] == "system": messages[0]["content"].append({"type": "tools", "value": tools}) else: messages.insert(0, {"role": "system", "content": [{"type": "tools", "value": tools}], "loss_weight": 0.0}) return {"messages": messages} @DataConverterPlugin("pair").register def pair_converter(raw_sample: PairSample) -> DPOSample: """Convert Pair sample to DPO sample. See raw example at: https://huggingface.co/datasets/HuggingFaceH4/orca_dpo_pairs Args: raw_sample (PairSample): pair sample with chosen, rejected fields. Returns: DPOSample: DPO sample with chosen_messages and rejected_messages. """ def process_message(raw_messages: list[OpenaiMessage]): messages = [] for message in raw_messages: messages.append( { "role": message["role"], "content": [{"type": "text", "value": message["content"]}], "loss_weight": 1.0 if message["role"] == "assistant" else 0.0, } ) return messages chosen_messages = process_message(raw_sample.get("chosen", [])) rejected_messages = process_message(raw_sample.get("rejected", [])) return {"chosen_messages": chosen_messages, "rejected_messages": rejected_messages}
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/data_plugins/__init__.py
src/llamafactory/v1/plugins/data_plugins/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/quantization.py
src/llamafactory/v1/plugins/model_plugins/quantization.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/initialization.py
src/llamafactory/v1/plugins/model_plugins/initialization.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ...accelerator.helper import DeviceType from ...accelerator.interface import DistributedInterface from ...utils.plugin import BasePlugin class InitPlugin(BasePlugin): def __call__(self) -> torch.device: return super().__call__() @InitPlugin("init_on_meta").register def init_on_meta() -> torch.device: return torch.device(DeviceType.META.value) @InitPlugin("init_on_rank0").register def init_on_rank0() -> torch.device: if DistributedInterface().get_rank() == 0: return torch.device(DeviceType.CPU.value) else: return torch.device(DeviceType.META.value) @InitPlugin("init_on_default").register def init_on_default() -> torch.device: return DistributedInterface().current_accelerator
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/__init__.py
src/llamafactory/v1/plugins/model_plugins/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/add_token.py
src/llamafactory/v1/plugins/model_plugins/add_token.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/peft.py
src/llamafactory/v1/plugins/model_plugins/peft.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Literal, TypedDict from peft import LoraConfig, PeftModel, get_peft_model from ...utils.plugin import BasePlugin from ...utils.types import HFModel class LoraConfigDict(TypedDict, total=False): name: Literal["lora"] """Plugin name.""" r: int """Lora rank.""" lora_alpha: int """Lora alpha.""" target_modules: list[str] """Target modules.""" class FreezeConfigDict(TypedDict, total=False): name: Literal["freeze"] """Plugin name.""" freeze_trainable_layers: int """Freeze trainable layers.""" freeze_trainable_modules: list[str] | None """Freeze trainable modules.""" class PeftPlugin(BasePlugin): def __call__(self, model: HFModel, config: dict, is_train: bool) -> HFModel: return super().__call__(model, config) @PeftPlugin("lora").register def get_lora_model(model: HFModel, config: LoraConfigDict, is_train: bool) -> PeftModel: peft_config = LoraConfig(**config) model = get_peft_model(model, peft_config) return model @PeftPlugin("freeze").register def get_freeze_model(model: HFModel, config: FreezeConfigDict, is_train: bool) -> HFModel: raise NotImplementedError()
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/kernels/registry.py
src/llamafactory/v1/plugins/model_plugins/kernels/registry.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The definition of kernel registry. Init Phase: 1. Define kernel registry. 2. Register kernels. """ from typing import Optional from ....accelerator.helper import get_current_accelerator from .base import BaseKernel __all__ = ["Registry", "register_kernel"] class Registry: r"""Registry for managing kernel implementations. Storage structure: ``{ "kernel_id": Class }`` """ _kernels: dict[str, type[BaseKernel]] = {} @classmethod def register(cls, kernel_cls: type[BaseKernel]): r"""Decorator to register a kernel class. The class must inherit from :class:`BaseKernel` and specify ``_kernel_id`` and ``_device`` attributes. Args: kernel_cls (type[BaseKernel]): The kernel class to register. Returns: type[BaseKernel]: The registered kernel class. Raises: TypeError: If the class does not inherit from :class:`BaseKernel`. ValueError: If the kernel ID is missing or already registered. """ if not issubclass(kernel_cls, BaseKernel): raise TypeError(f"Class {kernel_cls} must inherit from BaseKernel") kernel_id = kernel_cls.get_kernel_id() device = kernel_cls.get_device() # The device type of the current accelerator does not match the device type required by the kernel, skip registration if device != get_current_accelerator().type: return if not kernel_id: raise ValueError(f"Kernel ID (_kernel_id) is needed for {kernel_cls} to register") if kernel_id in cls._kernels: raise ValueError(f"{kernel_id} already registered! The registered kernel is {cls._kernels[kernel_id]}") cls._kernels[kernel_id] = kernel_cls return kernel_cls @classmethod def get(cls, kernel_id: str) -> Optional[type[BaseKernel]]: r"""Retrieves a registered kernel implementation by its ID. Args: kernel_id (str): The ID of the kernel to retrieve. Returns: Optional[type[BaseKernel]]: The kernel class if found, else ``None``. """ return cls._kernels.get(kernel_id) @classmethod def get_registered_kernels(cls) -> dict[str, type[BaseKernel]]: r"""Returns a dictionary of all registered kernels. Returns: dict[str, type[BaseKernel]]: Dictionary mapping kernel IDs to kernel classes. """ return cls._kernels # export decorator alias register_kernel = Registry.register
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/kernels/interface.py
src/llamafactory/v1/plugins/model_plugins/kernels/interface.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The definition of kernel interface. Init Phase: 1. Scan all kernels. 2. Register default kernels. 3. Define kernel plugin. """ import importlib from pathlib import Path from ....utils.logging import get_logger from ....utils.plugin import BasePlugin from .registry import Registry logger = get_logger(__name__) def scan_all_kernels(): r"""Scan all kernels in the ``ops`` directory. Scans the ``ops`` directory for all ``.py`` files and attempts to import them. Importing triggers the :func:`~registry.register_kernel` decorator, which automatically registers the kernels. Returns: dict[str, type[BaseKernel]]: A dictionary of registered kernels. .. note:: This function assumes that the ``ops`` directory is located in the same directory as this file. It recursively searches for ``.py`` files and constructs the module path for import. """ ops_path = Path(__file__).parent / "ops" if not ops_path.exists(): return base_package = __package__ for file_path in ops_path.rglob("*.py"): if file_path.name == "__init__.py": continue # calculate the relative path: # file_path = .../kernels_v2/ops/mlp/npu_swiglu.py # rel_path = ops/mlp/npu_swiglu.py rel_path = file_path.relative_to(Path(__file__).parent) # build module path: module_name = ".".join(rel_path.parts)[:-3] full_module_name = f"{base_package}.{module_name}" try: importlib.import_module(full_module_name) except Exception as e: logger.warning(f"[Kernel Registry] Failed to import {full_module_name} when loading kernels: {e}") return Registry.get_registered_kernels() default_kernels = scan_all_kernels() def get_default_kernels(): r"""Get a list of default registered kernel IDs. Returns: list[str]: List of kernel IDs. """ return list(default_kernels.keys()) def apply_kernel(kernel_id: str, **kwargs): r"""Applies a specific kernel to the model. Args: kernel_id (str): The ID of the kernel to apply. **kwargs: Keyword arguments passed to the kernel application function. Typically includes the model instance. Returns: HFModel: The model with applied kernel. """ kernel = default_kernels.get(kernel_id) if kernel is None: raise ValueError(f"Kernel {kernel_id} not found") kernel.apply(**kwargs) class KernelPlugin(BasePlugin): r"""Plugin for managing kernel optimizations.""" pass @KernelPlugin("auto").register def apply_default_kernels(**kwargs): r"""Applies all default registered kernels to the model. Args: **kwargs: Keyword arguments passed to the kernel application function. Typically includes the model instance and the include_kernels configuration. Returns: HFModel: The model with applied kernels. """ if not kwargs.get("include_kernels"): # None/False/empty string return kwargs.get("model") elif kwargs.get("include_kernels") == "auto" or kwargs.get("include_kernels") is True: # True/auto use_kernels = default_kernels.keys() else: use_kernels = kwargs.get("include_kernels").split(",") # "kernel_id1,kernel_id2,kernel_id3" for kernel in use_kernels: if kernel not in default_kernels: raise ValueError(f"Kernel {kernel} not found") apply_kernel(kernel, **kwargs) return kwargs.get("model")
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/kernels/__init__.py
src/llamafactory/v1/plugins/model_plugins/kernels/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/kernels/base.py
src/llamafactory/v1/plugins/model_plugins/kernels/base.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The definition of base kernel class. Init Phase: 1. Define base kernel class. 2. Define abstract methods. """ from abc import ABC, abstractmethod from typing import Any from ....accelerator.helper import DeviceType, get_current_accelerator from ....utils.types import HFModel class BaseKernel(ABC): r"""Base class for all kernel implementations. Subclasses must implement the abstract methods and define the required class attributes. """ _kernel_id: Any = "" # kernel ID, any hashable value to identify a kernel implementation _device: DeviceType = DeviceType.CPU # "cuda", "npu", "cpu", etc. @classmethod def get_kernel_id(cls) -> str: r"""Returns the unique identifier for the kernel.""" return cls._kernel_id @classmethod def get_device(cls) -> str: r"""Returns the device type associated with the kernel (e.g., "cuda", "npu", "cpu").""" return cls._device @classmethod def check_deps(cls) -> bool: r"""Checks if the required dependencies for the kernel are available. Returns: bool: ``True`` if dependencies are met, ``False`` otherwise. .. note:: In explicit mode, if a user specifies an implementation but this check fails, it should raise an error instead of silently switching. Kernels can override this method to implement custom dependency checks. """ if cls._device != get_current_accelerator().type: return False return True @classmethod @abstractmethod def apply(cls, **kwargs) -> HFModel: r"""Applies the kernel optimization to the model. Args: **kwargs: Arbitrary keyword arguments, usually containing the model instance and the kernel configuration. Returns: HFModel: The model with the kernel applied. Raises: RuntimeError: If the kernel dependencies are not met. NotImplementedError: If the method is not implemented by the subclass. Example: >>> from llamafactory.v1.plugins.model_plugins.kernels.interface import apply_kernel >>> model = HFModel(config=config) >>> model = apply_kernel(model=model, kernel_id="npu_fused_moe") """ if not cls.check_deps(): raise RuntimeError(f"{cls.__name__} is not available but {cls.__name__} kernel was called.") raise NotImplementedError
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/kernels/ops/__init__.py
src/llamafactory/v1/plugins/model_plugins/kernels/ops/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/kernels/ops/mlp/npu_fused_moe.py
src/llamafactory/v1/plugins/model_plugins/kernels/ops/mlp/npu_fused_moe.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The definition of NPU fused MoE kernels. Init Phase: 1. Define GMM functions. 2. Define NPU fused MoE functions. 3. Register NPU fused MoE kernel. """ import types import torch import torch.nn.functional as F try: import torch_npu except ImportError: pass from ......accelerator.helper import DeviceType from ......utils.packages import is_transformers_version_greater_than from ......utils.types import HFModel from ...base import BaseKernel from ...registry import register_kernel class GmmFunction(torch.autograd.Function): r"""Custom autograd function for NPU Grouped Matrix Multiplication (GMM).""" @staticmethod def forward(ctx, x, weight, group_list): r"""Performs the forward pass of Grouped Matrix Multiplication. Args: ctx: Context object to save tensors for backward pass. x (Tensor): Input tensor. weight (Tensor): Weight tensor. group_list (list): List of group sizes. Returns: Tensor: The result of the grouped matrix multiplication. """ ctx.save_for_backward(x, weight) ctx.group_list = group_list fwd_output = torch_npu.npu_grouped_matmul( [x], [weight], bias=None, group_list=group_list, split_item=2, group_type=0, group_list_type=1 )[0] return fwd_output @staticmethod def backward(ctx, grad_output): r"""Performs the backward pass of Grouped Matrix Multiplication. Args: ctx: Context object containing saved tensors. grad_output (Tensor): Gradient with respect to the output. Returns: tuple: Gradients with respect to input, weight, and None for group_list. """ input_tensor, weight = ctx.saved_tensors group_list = ctx.group_list weight = torch.transpose(weight, 1, 2) grad_input = torch_npu.npu_grouped_matmul( [grad_output], [weight], bias=None, group_list=group_list, split_item=2, group_type=0, group_list_type=1 )[0] grad_weight = torch_npu.npu_grouped_matmul( [input_tensor.T], [grad_output], bias=None, group_list=group_list, split_item=3, group_type=2, group_list_type=1, )[0] return grad_input, grad_weight, None class HybridGmmFunction(torch.autograd.Function): r"""Custom autograd function for Hybrid Grouped Matrix Multiplication on NPU.""" @staticmethod def forward(ctx, num_experts, *args): r"""Performs the forward pass of Hybrid GMM. Args: ctx: Context object to save tensors. num_experts (int): Number of experts. *args: Variable length argument list containing inputs and weights. Returns: tuple: The outputs of the grouped matrix multiplication. """ x_list = list(args[:num_experts]) weight_list = list(args[num_experts:]) split_sizes = [x.shape[0] for x in x_list] ctx.split_sizes = split_sizes ctx.num_experts = num_experts ctx.save_for_backward(*args) outputs = torch_npu.npu_grouped_matmul( x_list, weight_list, bias=None, group_list=None, split_item=0, group_type=-1 ) return tuple(outputs) @staticmethod def backward(ctx, *grad_outputs): r"""Performs the backward pass of Hybrid GMM. Args: ctx: Context object containing saved tensors. *grad_outputs: Gradients with respect to the outputs. Returns: tuple: Gradients with respect to inputs and weights. """ saved_tensors = ctx.saved_tensors num_experts = ctx.num_experts split_sizes = ctx.split_sizes x_list = list(saved_tensors[:num_experts]) weight_list = list(saved_tensors[num_experts:]) grad_outputs_contiguous = [g.contiguous() for g in grad_outputs] w_t_list = [w.t() for w in weight_list] grad_x_list = torch_npu.npu_grouped_matmul( grad_outputs_contiguous, # List[Tensor], 每个 [M_i, N] w_t_list, # List[Tensor], 每个 [N, K] (view) bias=None, group_list=None, split_item=0, group_type=-1, ) x_concat = torch.cat(x_list, dim=0) dy_concat = torch.cat(grad_outputs_contiguous, dim=0) # [Total_M, N] group_list = torch.tensor(split_sizes, device=x_concat.device, dtype=torch.int64) grad_w_stack = torch_npu.npu_grouped_matmul( [x_concat.t()], [dy_concat], bias=None, group_list=group_list, split_item=3, group_type=2, group_list_type=1, )[0] if grad_w_stack.dim() == 3: grad_w_list = list(torch.unbind(grad_w_stack, dim=0)) else: raise RuntimeError(f"Unexpected grad_w_stack shape: {grad_w_stack.shape}") return (None, *grad_x_list, *grad_w_list) class NpuMoeFused: r"""Container for NPU fused MoE forward functions.""" @staticmethod def npu_moe_experts_forward( self, hidden_states: torch.Tensor, routing_weights: torch.Tensor, router_indices: torch.Tensor ) -> torch.Tensor: r"""Forward pass for MoE experts using NPU fused operations. Args: self: The MoE layer instance. hidden_states (Tensor): Input hidden states. routing_weights (Tensor): Routing weights. router_indices (Tensor): Router indices. Returns: Tensor: Output tensor after expert computation. """ batch_size = hidden_states.shape[0] hidden_states = hidden_states.reshape(-1, self.hidden_size) permuted_hidden_states, row_ids_map = torch_npu.npu_moe_token_permute( hidden_states, router_indices.to(torch.int32) ) tokens_per_expert = torch.histc(router_indices, bins=self.num_experts, min=0, max=self.num_experts) intermediate_hidden_states = GmmFunction.apply(permuted_hidden_states, self.gate_up_proj, tokens_per_expert) intermediate_activations = torch_npu.npu_swiglu(intermediate_hidden_states, dim=-1) output = GmmFunction.apply(intermediate_activations, self.down_proj, tokens_per_expert) next_states = torch_npu.npu_moe_token_unpermute(output, row_ids_map, probs=routing_weights) next_states = next_states.view(batch_size, -1, self.hidden_size) return next_states @staticmethod def npu_moe_sparse_block_forward(self, hidden_states: torch.Tensor) -> torch.Tensor: r"""Forward pass for sparse MoE block using NPU optimization. Args: self: The MoE sparse block instance. hidden_states (Tensor): Input hidden states. Returns: Tensor: The routed output. """ batch_size = hidden_states.shape[0] hidden_states = hidden_states.reshape(-1, self.hidden_size) router_logits = self.gate(hidden_states) routing_weights = torch.nn.functional.softmax(router_logits, dim=-1, dtype=torch.float) routing_weights, router_indices = torch.topk(routing_weights, self.top_k, dim=-1) routing_weights = routing_weights / routing_weights.sum(dim=-1, keepdim=True) routing_weights = routing_weights.to(hidden_states.dtype) hidden_states = hidden_states.reshape(batch_size, -1, self.hidden_size) routed_out = self.experts(hidden_states, routing_weights, router_indices) return routed_out class Qwen3NpuMoeFused: r"""Container for Qwen3 NPU fused MoE forward functions.""" @staticmethod def qwen3moe_sparse_moe_block_forward(self, hidden_states: torch.Tensor): r"""Forward pass for Qwen3 sparse MoE block using NPU fused operations. Args: self: The Qwen3 MoE block instance. hidden_states (Tensor): Input hidden states. Returns: tuple: A tuple containing the next states and router logits. """ batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) router_logits = self.gate(hidden_states) routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) if self.norm_topk_prob: routing_weights /= routing_weights.sum(dim=-1, keepdim=True) routing_weights = routing_weights.to(hidden_states.dtype) permuted_hidden_states, row_ids_map = torch_npu.npu_moe_token_permute(hidden_states, selected_experts.int()) tokens_per_expert = torch.histc( selected_experts.float(), bins=self.num_experts, min=0, max=self.num_experts ).long() split_sizes = tokens_per_expert.tolist() input_list = list(torch.split(permuted_hidden_states, split_sizes, dim=0)) gate_weights = [e.gate_proj.weight.t() for e in self.experts] up_weights = [e.up_proj.weight.t() for e in self.experts] down_weights = [e.down_proj.weight.t() for e in self.experts] gate_out_tuple = HybridGmmFunction.apply(len(input_list), *input_list, *gate_weights) up_out_tuple = HybridGmmFunction.apply(len(input_list), *input_list, *up_weights) inter_list = [F.silu(g) * u for g, u in zip(gate_out_tuple, up_out_tuple)] down_out_tuple = HybridGmmFunction.apply(len(inter_list), *inter_list, *down_weights) grouped_output = torch.cat(down_out_tuple, dim=0) next_states = torch_npu.npu_moe_token_unpermute(grouped_output, row_ids_map, probs=routing_weights) next_states = next_states.view(batch_size, sequence_length, -1) return next_states, router_logits # moe patch config mapping kernel_moe_mapping = { "Qwen3VLMoeForConditionalGeneration": { "Qwen3VLMoeTextExperts": NpuMoeFused.npu_moe_experts_forward, "Qwen3VLMoeTextSparseMoeBlock": NpuMoeFused.npu_moe_sparse_block_forward, } } if not is_transformers_version_greater_than("5.0.0"): kernel_moe_mapping["Qwen3MoeForCausalLM"] = { "Qwen3MoeSparseMoeBlock": Qwen3NpuMoeFused.qwen3moe_sparse_moe_block_forward } @register_kernel class NpuFusedMoEKernel(BaseKernel): r"""NPU Fused MoE Kernel implementation.""" _kernel_id = "npu_fused_moe" _device = DeviceType.NPU @classmethod def apply(cls, **kwargs) -> HFModel: r"""Applies the NPU fused MoE kernel to the model. Args: **kwargs: Keyword arguments containing the model. Returns: HFModel: The model with patched MoE forward functions. Raises: ValueError: If the model is not provided. RuntimeError: If dependencies are not met. """ model = kwargs.get("model", None) if model is None: raise ValueError(f"HFModel instance is required for {cls.__name__}.") if not cls.check_deps(): raise RuntimeError("torch_npu is not available but NpuMoEFusedMoEKernel was called.") archs = getattr(model.config, "architectures", []) target_moe_mapping = None for arch in archs: if arch in kernel_moe_mapping: target_moe_mapping = kernel_moe_mapping[arch] break if target_moe_mapping is None: return model for module in model.modules(): class_name = module.__class__.__name__ if class_name in target_moe_mapping: new_forward_func = target_moe_mapping[class_name] module.forward = types.MethodType(new_forward_func, module) return model
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/kernels/ops/mlp/__init__.py
src/llamafactory/v1/plugins/model_plugins/kernels/ops/mlp/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/kernels/ops/mlp/npu_swiglu.py
src/llamafactory/v1/plugins/model_plugins/kernels/ops/mlp/npu_swiglu.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The definition of NPU fused SwiGLU kernels. Init Phase: 1. Define SwiGLU forward functions. 2. Register NPU fused SwiGLU kernel. """ import re import types import torch from ......accelerator.helper import DeviceType from ......utils.types import HFModel from ...base import BaseKernel from ...registry import register_kernel try: import torch_npu except ImportError: pass def npu_swiglu_forward(self, hidden_state): r"""SwiGLU forward pass for NPU. Args: self: The MLP layer instance. hidden_state (Tensor): Input hidden state. Returns: Tensor: Output of SwiGLU. """ return self.down_proj( torch_npu.npu_swiglu(torch.cat((self.gate_proj(hidden_state), self.up_proj(hidden_state)), dim=-1), dim=-1) ) def _npu_swiglu_glm4_forward(self, hidden_states): r"""SwiGLU forward pass for GLM4 on NPU. Args: self: The GLM4 MLP layer instance. hidden_states (Tensor): Input hidden states. Returns: Tensor: Output of SwiGLU. """ up_states = self.gate_up_proj(hidden_states) gate, up_states = up_states.chunk(2, dim=-1) return self.down_proj(torch_npu.npu_swiglu(torch.cat((gate, up_states), dim=-1), dim=-1)) def _npu_swiglu_gemma3ntext_forward(self, hidden_states): r"""SwiGLU forward pass for Gemma3nText on NPU. Args: self: The Gemma3nText MLP layer instance. hidden_states (Tensor): Input hidden states. Returns: Tensor: Output of SwiGLU. """ gate_proj = self.gate_proj(hidden_states) if self.activation_sparsity > 0.0: gate_proj = self._gaussian_topk(gate_proj) down_proj = self.down_proj( torch_npu.npu_swiglu(torch.cat((gate_proj, self.up_proj(hidden_states)), dim=-1), dim=-1) ) return down_proj @register_kernel class NpuSwiGluKernel(BaseKernel): r"""NPU Kernel for fused SwiGLU activation.""" # just support apply to the following module layers expect_modules = frozenset( { "Qwen3VLMoeTextMLP", "Qwen3VLTextMLP", "Qwen3OmniMoeThinkerTextMLP", "Qwen3OmniMoeMLP", "Qwen3OmniMoeTalkerTextMLP", "Qwen3OmniMoeCode2WavMlp", "Qwen3NextMLP", "Qwen3MoeMLP", "Qwen3MLP", "Qwen2MLP", "Qwen2MoeMLP", "Qwen2_5_VLMLP", "Qwen2_5OmniMLP", "Llama4TextMLP", "LlamaMLP", "Glm4MLP", "Glm4MoeMLP", "Glm4vMoeTextMLP", "Gemma3MLP", "Gemma2MLP", "Gemma3nTextMLP", "Phi3MLP", "DeepseekV2MLP", "DeepseekV3MLP", "SeedOssMLP", } ) _kernel_id = "npu_fused_swiglu" _device = DeviceType.NPU @classmethod def apply(cls, **kwargs) -> "HFModel": r"""Applies the NPU fused SwiGLU kernel to the model. Args: **kwargs: Keyword arguments containing the model. Returns: HFModel: The model with patched SwiGLU forward functions. Raises: ValueError: If the model is not provided. RuntimeError: If dependencies are not met. """ model = kwargs.get("model", None) if model is None: raise ValueError(f"HFModel instance is required for {cls.__name__}.") if not cls.check_deps(): raise RuntimeError("torch_npu is not available but NpuSwiGluKernel was called.") # Mapping of specific mlp modules to their corresponding kernel implementations kernel_mapping = { "Glm4MLP": _npu_swiglu_glm4_forward, "Glm4vTextMLP": _npu_swiglu_glm4_forward, "Phi3MLP": _npu_swiglu_glm4_forward, "Gemma3nTextMLP": _npu_swiglu_gemma3ntext_forward, } swiglu_pattern = re.compile("MLP", re.IGNORECASE) for name, module in model.named_modules(): # Match any module whose class name contains "MLP" if ( re.search(swiglu_pattern, module.__class__.__name__) and module.__class__.__name__ in cls.expect_modules ): # Bind function as an instance method to preserve `self` semantics # and replace the original forward kernel_func = kernel_mapping.get(module.__class__.__name__, npu_swiglu_forward) module.forward = types.MethodType(kernel_func, module) return model
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/kernels/ops/rms_norm/__init__.py
src/llamafactory/v1/plugins/model_plugins/kernels/ops/rms_norm/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/kernels/ops/rms_norm/npu_rms_norm.py
src/llamafactory/v1/plugins/model_plugins/kernels/ops/rms_norm/npu_rms_norm.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The definition of NPU fused RMSNorm kernels. Init Phase: 1. Define RMSNorm forward function. 2. Register NPU fused RMSNorm kernel. """ import re import types from ......accelerator.helper import DeviceType from ......utils.types import HFModel from ...base import BaseKernel from ...registry import register_kernel def npu_rms_norm_forward(self, hidden_states): r"""NPU forward implementation for RMSNorm. Args: self: RMSNorm module instance with `weight` and `variance_epsilon`. hidden_states (Tensor): Input hidden states tensor, same shape as the baseline. Returns: Tensor: Normalized tensor consistent with the baseline RMSNorm behavior. """ import torch_npu return torch_npu.npu_rms_norm(hidden_states, self.weight, epsilon=self.variance_epsilon)[0] @register_kernel class NpuRMSNormKernel(BaseKernel): r"""NPU kernel wrapper for RMSNorm that applies the replacement within a model.""" _kernel_id = "npu_fused_rmsnorm" _device = DeviceType.NPU @classmethod def apply(cls, **kwargs) -> "HFModel": r"""Iterate the model and apply NPU-optimized forward to matched RMSNorm modules. Key points: - Match modules whose class name contains "RMSNorm" (case-insensitive). - Bind `_npu_rms_forward` as an instance method via `types.MethodType` to replace the original `forward`. - Do not modify weights, hyperparameters, or module structure to ensure numerical behavior and interface consistency. Args: **kwargs: Keyword arguments containing the model. Returns: HFModel: The model with NPU fused RMSNorm. Raises: RuntimeError: If torch_npu is not available. ValueError: If the model is not provided. """ model = kwargs.get("model") if model is None: raise ValueError(f"HFModel instance is required for {cls.__name__}.") if not cls.check_deps(): raise RuntimeError(f"torch_npu is not available but {cls.__name__} was called.") rms_norm_pattern = re.compile("RMSNorm", re.IGNORECASE) for name, module in model.named_modules(): # Match any module whose class name contains "RMSNorm" if re.search(rms_norm_pattern, module.__class__.__name__): # Bind function as an instance method to preserve `self` semantics # and replace the original forward module.forward = types.MethodType(npu_rms_norm_forward, module) return model
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/kernels/ops/rope/npu_rope.py
src/llamafactory/v1/plugins/model_plugins/kernels/ops/rope/npu_rope.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The definition of NPU fused RoPE kernels. Init Phase: 1. Define RoPE forward functions. 2. Register NPU fused RoPE kernel. """ import sys import torch from ......accelerator.helper import DeviceType from ......utils.logging import get_logger from ......utils.types import HFModel from ...base import BaseKernel from ...registry import register_kernel logger = get_logger(__name__) try: import torch_npu except ImportError: pass def _apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): r"""Applies Rotary Position Embedding to the query and key tensors using NPU optimization. Args: q (Tensor): Query tensor. k (Tensor): Key tensor. cos (Tensor): Cosine part of embedding. sin (Tensor): Sine part of embedding. position_ids (Tensor, optional): Position IDs. Default: ``None``. unsqueeze_dim (int): Dimension to unsqueeze cos and sin. Default: 1. Returns: tuple: (q_embed, k_embed) The embedded query and key tensors. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = torch_npu.npu_rotary_mul(q, cos, sin) k_embed = torch_npu.npu_rotary_mul(k, cos, sin) return q_embed, k_embed def _apply_multimodal_rotary_pos_emb_qwen25_vl(q, k, cos, sin, mrope_section, unsqueeze_dim=1): r"""Applies Rotary Position Embedding with multimodal sections (Qwen2-VL) on NPU. Args: q (Tensor): Query tensor. k (Tensor): Key tensor. cos (Tensor): Cosine part of embedding. sin (Tensor): Sine part of embedding. mrope_section (Tensor): Multimodal RoPE section. unsqueeze_dim (int): Dimension to unsqueeze cos and sin. Default: 1. Returns: tuple: (q_embed, k_embed) The embedded query and key tensors. """ mrope_section = mrope_section * 2 cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze( unsqueeze_dim ) sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze( unsqueeze_dim ) q_embed = torch_npu.npu_rotary_mul(q, cos, sin) k_embed = torch_npu.npu_rotary_mul(k, cos, sin) return q_embed, k_embed @register_kernel class NpuRoPEKernel(BaseKernel): r"""NPU Kernel for Rotary Position Embedding.""" _kernel_id = "npu_fused_rope" _device = DeviceType.NPU @classmethod def apply(cls, **kwargs) -> "HFModel": r"""Apply RoPE acceleration by monkey-patching `apply_rotary_pos_emb`. This function iterates through the model's modules to find attention layers, identifies the module where they are defined, and replaces the original `apply_rotary_pos_emb` function in that module's namespace with the NPU-accelerated version from this file. Args: **kwargs: Keyword arguments containing the model. Returns: HFModel: The model with patched RoPE functions. Raises: RuntimeError: If dependencies are not met. ValueError: If the model is not provided. """ if not cls.check_deps(): raise RuntimeError(f"torch_npu is not available but {cls.__name__} was called.") model = kwargs.get("model", None) if model is None: raise ValueError(f"HFModel instance is required for {cls.__name__}.") _modules = set() for module in model.modules(): if "Attention" in module.__class__.__name__: module_name = module.__class__.__module__ if module_name in _modules: continue try: target_module = sys.modules[module_name] if hasattr(target_module, "apply_rotary_pos_emb"): if getattr(target_module, "apply_rotary_pos_emb") is not _apply_rotary_pos_emb: setattr(target_module, "apply_rotary_pos_emb", _apply_rotary_pos_emb) _modules.add(module_name) if hasattr(target_module, "apply_multimodal_rotary_pos_emb"): if ( getattr(target_module, "apply_multimodal_rotary_pos_emb") is not _apply_multimodal_rotary_pos_emb_qwen25_vl ): setattr( target_module, "apply_multimodal_rotary_pos_emb", _apply_multimodal_rotary_pos_emb_qwen25_vl, ) _modules.add(module_name) except Exception as e: logger.warning_rank0_once(f"Failed to apply RoPE kernel to module {module_name}: {e}") return model
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/plugins/model_plugins/kernels/ops/rope/__init__.py
src/llamafactory/v1/plugins/model_plugins/kernels/ops/rope/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/samplers/cli_sampler.py
src/llamafactory/v1/samplers/cli_sampler.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..config import InputArgument, SampleBackend, get_args from ..core.base_sampler import BaseSampler from ..core.model_loader import ModelLoader def run_chat(args: InputArgument = None): data_args, model_args, _, sample_args = get_args(args) if sample_args.sample_backend != SampleBackend.HF: model_args.init_plugin = {"name": "init_on_meta"} model_loader = ModelLoader(model_args) sampler = BaseSampler(sample_args, model_args, model_loader.model, model_loader.processor) if data_args.dataset is not None: sampler.batch_infer() else: sampler.generate() if __name__ == "__main__": run_chat()
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/accelerator/helper.py
src/llamafactory/v1/accelerator/helper.py
# Copyright 2025 Bytedance Ltd. and the LlamaFactory team. # # This code is inspired by the Bytedance's VeOmni library. # https://github.com/ByteDance-Seed/VeOmni/blob/v0.1.4/veomni/utils/dist_utils.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions used by the distributed interface. Including: - Environment info (rank, world_size, local_rank, etc.) - Accelerator info (device type, device count, etc.) - Collective communication operations (all_gather, all_reduce, broadcast) - Synchronize processes and ensure main-process-first execution order """ import os from collections.abc import Callable from contextlib import contextmanager from enum import Enum, unique from functools import lru_cache, wraps from typing import Optional import numpy as np import torch import torch.distributed as dist from ..utils.types import ProcessGroup, Tensor, TensorLike @unique class DeviceType(str, Enum): CPU = "cpu" CUDA = "cuda" META = "meta" MPS = "mps" NPU = "npu" XPU = "xpu" @unique class ReduceOp(str, Enum): SUM = "sum" MEAN = "mean" MAX = "max" MIN = "min" def requires_accelerator(fn): """Decorator to check if torch.accelerator is available. Note: this api requires torch>=2.7.0, otherwise it will raise an AttributeError or RuntimeError """ @wraps(fn) def wrapper(*args, **kwargs): if not hasattr(torch, "accelerator"): raise RuntimeError("torch.accelerator is not available, please upgrade torch to 2.7.0 or higher.") return fn(*args, **kwargs) return wrapper def is_distributed() -> bool: """Check if distributed environment is available.""" return os.getenv("RANK") is not None def get_rank() -> int: """Get rank.""" return int(os.getenv("RANK", "0")) def get_world_size() -> int: """Get world size.""" return int(os.getenv("WORLD_SIZE", "1")) def get_local_rank() -> int: """Get local rank.""" return int(os.getenv("LOCAL_RANK", "0")) def get_local_world_size() -> int: """Get local world size.""" return int(os.getenv("LOCAL_WORLD_SIZE", "1")) @lru_cache @requires_accelerator def get_current_accelerator(check_available: bool = True) -> torch.device: """Get current accelerator.""" accelerator = torch.accelerator.current_accelerator(check_available=check_available) return accelerator or torch.device(DeviceType.CPU.value) @lru_cache @requires_accelerator def get_device_count() -> int: """Get the number of available devices.""" return torch.accelerator.device_count() @requires_accelerator def synchronize() -> None: """Synchronize all processes.""" torch.accelerator.synchronize() @requires_accelerator def set_device() -> None: """Set current accelerator.""" torch.accelerator.set_device_index(get_local_rank()) def is_torch_cuda_available(): """Check if CUDA is available.""" return get_current_accelerator().type == DeviceType.CUDA def is_torch_mps_available(): """Check if MPS is available.""" return get_current_accelerator().type == DeviceType.MPS def is_torch_npu_available(): """Check if NPU is available.""" return get_current_accelerator().type == DeviceType.NPU def is_torch_xpu_available(): """Check if XPU is available.""" return get_current_accelerator().type == DeviceType.XPU def operate_tensorlike(fn: Callable[[...], Tensor], data: TensorLike, **kwargs) -> TensorLike: """Operate tensorlike data on current accelerator.""" device = get_current_accelerator() is_tensor = isinstance(data, torch.Tensor) is_ndarray = isinstance(data, np.ndarray) if is_tensor: orig_device = data.device data = data.to(device=device) elif is_ndarray: data = torch.from_numpy(data).to(device=device, dtype=torch.float) else: data = torch.tensor(data, dtype=torch.float, device=device) result = fn(data, **kwargs) if is_tensor: return result.to(orig_device) elif is_ndarray: return result.cpu().numpy() elif result.numel() == 1: return result.item() else: return result.tolist() def all_gather(tensor: Tensor, group: Optional[ProcessGroup] = None) -> Tensor: """Gathers the tensor from all ranks and stacks them at the first dim.""" world_size = get_world_size() output_tensor = torch.empty(world_size * tensor.numel(), dtype=tensor.dtype, device=tensor.device) dist.all_gather_into_tensor(output_tensor, tensor, group=group) return output_tensor.view(-1, *tensor.size()) def all_reduce(tensor: Tensor, op: ReduceOp = ReduceOp.MEAN, group: Optional[ProcessGroup] = None) -> Tensor: """Performs all reduce in the given process group.""" reduce_ops = { ReduceOp.MEAN: dist.ReduceOp.SUM, ReduceOp.SUM: dist.ReduceOp.SUM, ReduceOp.MAX: dist.ReduceOp.MAX, ReduceOp.MIN: dist.ReduceOp.MIN, } dist.all_reduce(tensor, op=reduce_ops[op], group=group) if op == ReduceOp.MEAN: # ReduceOp.AVG is not supported by the NPU backend tensor /= dist.get_world_size(group=group) return tensor def broadcast(tensor: Tensor, src: int = 0, group: Optional[ProcessGroup] = None) -> Tensor: """Broadcasts the tensor from the src process to all other processes.""" dist.broadcast(tensor, src=src, group=group) return tensor @contextmanager def main_process_first(local_only: bool = True) -> None: """A context manager for torch distributed environment to do something on the main process firstly.""" if get_world_size() > 1: is_main_process = get_local_rank() == 0 if local_only else get_rank() == 0 try: if not is_main_process: dist.barrier() yield finally: if is_main_process: dist.barrier() else: yield
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/accelerator/profiler.py
src/llamafactory/v1/accelerator/profiler.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/accelerator/interface.py
src/llamafactory/v1/accelerator/interface.py
# Copyright 2025 Bytedance Ltd. and the LlamaFactory team. # # This code is inspired by the Bytedance's VeOmni library. # https://github.com/ByteDance-Seed/VeOmni/blob/v0.1.4/veomni/distributed/parallel_state.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A unified interface for model parallelism and data parallelism. Supports model parallelism types: - mp_replicate: Replicate model across multiple devices. - mp_shard: Shard model across multiple devices. And data parallelism types: - dp: Data parallelism. - cp: Context parallelism. """ from dataclasses import dataclass from datetime import timedelta from enum import Enum from typing import Any, Optional from torch.distributed import barrier, destroy_process_group, init_process_group from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from ..utils import logging from ..utils.types import DistributedConfig, ProcessGroup, Tensor, TensorLike from . import helper logger = logging.get_logger(__name__) class Dim(str, Enum): """Dimension names.""" MP_REPLICATE = "mp_replicate" MP_SHARD = "mp_shard" DP = "dp" CP = "cp" @dataclass class DistributedStrategy: """Distributed strategy.""" mp_replicate_size: int = 1 """Model parallel replicate size, default to 1.""" mp_shard_size: int | None = None """Model parallel shard size, default to world_size // mp_replicate_size.""" dp_size: int | None = None """Data parallel size, default to world_size // cp_size.""" cp_size: int = 1 """Context parallel size, default to 1.""" def __post_init__(self) -> None: if not helper.is_distributed(): self.mp_shard_size = 1 elif self.mp_shard_size is None: self.mp_shard_size = helper.get_world_size() // self.mp_replicate_size elif self.mp_replicate_size * self.mp_shard_size != helper.get_world_size(): raise ValueError( f"mp_replicate_size * mp_shard_size must equal to world_size, " f"got {self.mp_replicate_size} * {self.mp_shard_size} != {helper.get_world_size()}." ) if not helper.is_distributed(): self.dp_size = 1 elif self.dp_size is None: self.dp_size = helper.get_world_size() // self.cp_size elif self.dp_size * self.cp_size != helper.get_world_size(): raise ValueError( f"dp_size * cp_size must equal to world_size, " f"got {self.dp_size} * {self.cp_size} != {helper.get_world_size()}." ) @property def model_mesh_shape(self) -> tuple[int, int]: """Model parallel mesh shape.""" return (self.mp_replicate_size, self.mp_shard_size) @property def model_mesh_dim_names(self) -> tuple[str, str]: """Model parallel mesh dimension names.""" return (Dim.MP_REPLICATE.value, Dim.MP_SHARD.value) @property def data_mesh_shape(self) -> tuple[int, int]: """Data parallel mesh shape.""" return (self.dp_size, self.cp_size) @property def data_mesh_dim_names(self) -> tuple[str, str]: """Data parallel mesh dimension names.""" return (Dim.DP.value, Dim.CP.value) class DistributedInterface: """Distributed interface.""" _instance: Optional["DistributedInterface"] = None _initialized: bool = False def __new__(cls, *args: Any, **kwargs: Any) -> "DistributedInterface": """Singleton pattern.""" if cls._instance is None: cls._instance = super().__new__(cls) return cls._instance def __init__(self, config: DistributedConfig | None = None) -> None: if self._initialized: return self._is_distributed = helper.is_distributed() self._rank = helper.get_rank() self._world_size = helper.get_world_size() self._local_rank = helper.get_local_rank() self._local_world_size = helper.get_local_world_size() self.current_accelerator = helper.get_current_accelerator() self.device_count = helper.get_device_count() if config is None: self.strategy = DistributedStrategy() timeout = 18000 else: self.strategy = DistributedStrategy( mp_replicate_size=config.get("mp_replicate_size", 1), mp_shard_size=config.get("mp_shard_size", None), dp_size=config.get("dp_size", None), cp_size=config.get("cp_size", 1), ) timeout = config.get("timeout", 18000) if self._is_distributed: helper.set_device() init_process_group(timeout=timedelta(seconds=timeout)) self.model_device_mesh = init_device_mesh( device_type=self.current_accelerator.type, mesh_shape=self.strategy.model_mesh_shape, mesh_dim_names=self.strategy.model_mesh_dim_names, ) self.data_device_mesh = init_device_mesh( device_type=self.current_accelerator.type, mesh_shape=self.strategy.data_mesh_shape, mesh_dim_names=self.strategy.data_mesh_dim_names, ) else: self.model_device_mesh = None self.data_device_mesh = None self._initialized = True logger.info_rank0(f"DistributedInterface initialized with strategy={self.strategy}.") def __str__(self) -> str: return ( f"DistributedInterface(strategy={self.strategy}), is_distributed={self._is_distributed}, " f"current_accelerator={self.current_accelerator}, rank={self._rank}, world_size={self._world_size}, " f"model_device_mesh={self.model_device_mesh}, data_device_mesh={self.data_device_mesh}" ) def get_device_mesh(self, dim: Dim | None = None) -> DeviceMesh | None: """Get device mesh for specified dimension.""" if dim is None: raise ValueError("dim must be specified.") elif self.model_device_mesh is None: return None elif dim in self.strategy.data_mesh_dim_names: return self.data_device_mesh[dim.value] else: return self.model_device_mesh[dim.value] def get_group(self, dim: Dim | None = None) -> Optional[ProcessGroup]: """Get process group for specified dimension.""" if self.model_device_mesh is None or dim is None: return None else: return self.get_device_mesh(dim).get_group() def get_rank(self, dim: Dim | None = None) -> int: """Get parallel rank for specified dimension.""" if self.model_device_mesh is None: return 0 elif dim is None: return self._rank else: return self.get_device_mesh(dim).get_local_rank() def get_world_size(self, dim: Dim | None = None) -> int: """Get parallel size for specified dimension.""" if self.model_device_mesh is None: return 1 elif dim is None: return self._world_size else: return self.get_device_mesh(dim).size() def get_local_rank(self) -> int: """Get parallel local rank.""" return self._local_rank def get_local_world_size(self) -> int: """Get parallel local world size.""" return self._local_world_size def all_gather(self, data: Tensor, dim: Dim | None = Dim.DP) -> Tensor: """Gather tensor across specified parallel group.""" if self.model_device_mesh is not None: return helper.operate_tensorlike(helper.all_gather, data, group=self.get_group(dim)) else: return data def all_reduce( self, data: TensorLike, op: helper.ReduceOp = helper.ReduceOp.MEAN, dim: Dim | None = Dim.DP ) -> TensorLike: """Reduce tensor across specified parallel group.""" if self.model_device_mesh is not None: return helper.operate_tensorlike(helper.all_reduce, data, op=op, group=self.get_group(dim)) else: return data def broadcast(self, data: TensorLike, src: int = 0, dim: Dim | None = Dim.DP) -> TensorLike: """Broadcast tensor across specified parallel group.""" if self.model_device_mesh is not None: return helper.operate_tensorlike(helper.broadcast, data, src=src, group=self.get_group(dim)) else: return data def sync(self) -> None: """Synchronize all processes.""" helper.synchronize() def barrier(self) -> None: """Barrier all processes.""" barrier() def destroy(self) -> None: """Destroy all processes.""" destroy_process_group() if __name__ == "__main__": print(DistributedInterface(DistributedStrategy()))
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/accelerator/__init__.py
src/llamafactory/v1/accelerator/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/core/base_sampler.py
src/llamafactory/v1/core/base_sampler.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from ..config import ModelArguments, SampleArguments, SampleBackend from ..utils.types import HFModel, Processor, TorchDataset class BaseEngine(ABC): @abstractmethod def __init__( self, args: SampleArguments, model_args: ModelArguments, model: HFModel = None, processor: Processor = None, ) -> None: """Initialize the engine. Args: args: Sample arguments. model_args: Model arguments. model: Model. processor: Processor. """ ... @abstractmethod async def generate(self, messages): pass @abstractmethod async def batch_infer(self, data: TorchDataset) -> None: pass class HuggingFaceEngine(BaseEngine): def __init__( self, args: SampleArguments, model_args: ModelArguments, model: HFModel, processor: Processor, ) -> None: self.args = args class BaseSampler: def __init__( self, args: SampleArguments, model_args: ModelArguments, model: HFModel, processor: Processor, ) -> None: if args.sample_backend == SampleBackend.HF: self.engine = HuggingFaceEngine(args, model_args, model, processor) else: raise ValueError(f"Unknown sample backend: {args.sample_backend}") async def generate(self, messages): return await self.engine.generate(messages) async def batch_infer(self, data: TorchDataset) -> None: return await self.engine.batch_infer(data)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/core/data_engine.py
src/llamafactory/v1/core/data_engine.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The definition of data engine. Init Data engine: 1. Parse dataset info from arguments. 2. Load datasets according to dataset info. 3. Build data index (and reweight samples if necessary). Get Data Sample: 1. Get sample from data index. 2. Convert sample to standard format. 3. Return sample. """ import os from collections.abc import Iterable from typing import Any from huggingface_hub import hf_hub_download from omegaconf import OmegaConf from torch.utils.data import Dataset from ..config.data_args import DataArguments from ..utils.types import DatasetInfo, HFDataset, Sample class DataEngine(Dataset): """Data engine. Args: data_args: Data arguments. """ def __init__(self, data_args: DataArguments) -> None: self.args = data_args """Data arguments.""" self.datasets: dict[str, HFDataset] = {} """Dict of (dataset_name, dataset)""" self.dataset_infos: dict[str, DatasetInfo] = {} """Dict of (dataset_name, dataset_info)""" self.data_index: list[tuple[str, int]] = [] """List of (dataset_name, sample_index)""" self.streaming: bool = False """Whether dataset is streaming.""" self._get_dataset_info() self._load_dataset() self._build_data_index() def _get_dataset_info(self) -> None: """Get dataset info from data arguments.""" if self.args.dataset.endswith(".yaml") and os.path.isfile(self.args.dataset): # local file self.dataset_infos = OmegaConf.load(self.args.dataset) elif self.args.dataset.endswith(".yaml"): # hf hub uri, e.g. llamafactory/v1-sft-demo/dataset_info.yaml repo_id, filename = os.path.split(self.args.dataset) filepath = hf_hub_download(repo_id=repo_id, filename=filename, repo_type="dataset") self.dataset_infos = OmegaConf.load(filepath) elif os.path.exists(self.args.dataset): # local file(s) self.dataset_infos = {"default": {"path": self.args.dataset, "source": "local"}} else: # hf hub dataset, e.g. llamafactory/v1-sft-demo self.dataset_infos = {"default": {"path": self.args.dataset}} def _load_dataset(self) -> None: """Load datasets according to dataset info.""" for dataset_name, dataset_info in self.dataset_infos.items(): split = dataset_info.get("split", "train") streaming = dataset_info.get("streaming", False) self.streaming |= streaming if dataset_info.get("source", "hf_hub") == "hf_hub": from datasets import load_dataset self.datasets[dataset_name] = load_dataset(dataset_info["path"], split=split, streaming=streaming) else: # data loader plugin from ..plugins.data_plugins.loader import DataLoaderPlugin self.datasets[dataset_name] = DataLoaderPlugin(dataset_info["source"]).load(dataset_info) def _build_data_index(self) -> None: """Build dataset index.""" for dataset_name, dataset in self.datasets.items(): streaming = self.dataset_infos[dataset_name].get("streaming", False) if streaming: data_index = [(dataset_name, -1) for _ in range(1000)] else: data_index = [(dataset_name, sample_index) for sample_index in range(len(dataset))] size = self.dataset_infos[dataset_name].get("size") weight = self.dataset_infos[dataset_name].get("weight") if size or weight: # data index plugin from ..plugins.data_plugins.loader import DataIndexPlugin data_index = DataIndexPlugin().adjust_data_index(data_index, size, weight) self.data_index.extend(data_index) def _convert_data_sample(self, raw_sample: dict[str, Any], dataset_name: str) -> Sample: """Convert dataset sample. Args: raw_sample (dict[str, Any]): Raw dataset sample. dataset_name (str): Dataset name. Returns: Sample: Dataset sample. """ converter = self.dataset_infos[dataset_name].get("converter") if converter is not None: from ..plugins.data_plugins.converter import DataConverterPlugin return {"_dataset_name": dataset_name, **DataConverterPlugin(converter)(raw_sample)} else: return {"_dataset_name": dataset_name, **raw_sample} def __len__(self) -> int: """Get dataset length. Returns: int: Dataset length. """ if self.streaming: return -1 else: return len(self.data_index) def __getitem__(self, index: int | Any) -> Sample | list[Sample]: """Get dataset item. Args: index (int): Dataset index. Returns: Sample: Dataset item. """ if self.streaming: raise ValueError("Streaming dataset does not support index access.") if isinstance(index, int): dataset_name, sample_index = self.data_index[index] return self._convert_data_sample(self.datasets[dataset_name][sample_index], dataset_name) else: # data selector plugin from ..plugins.data_plugins.loader import DataSelectorPlugin selected_index = DataSelectorPlugin().select(self.data_index, index) if isinstance(selected_index, list): return [ self._convert_data_sample(self.datasets[dataset_name][sample_index], dataset_name) for dataset_name, sample_index in selected_index ] else: dataset_name, sample_index = selected_index return self._convert_data_sample(self.datasets[dataset_name][sample_index], dataset_name) def __iter__(self) -> Iterable[Sample]: """Get dataset iterator. Returns: Iterable[Sample]: Dataset iterator. """ # NOTE: hf iterable dataset uses worker ids while map dataset does not # NOTE: add worker id and shuffle to the map dataset # https://github.com/huggingface/datasets/blob/4.0.0/src/datasets/iterable_dataset.py#L2214 raise NotImplementedError() if __name__ == "__main__": """ python -m llamafactory.v1.core.data_engine --model none --dataset data/v1_sft_demo.yaml python -m llamafactory.v1.core.data_engine --model none --dataset data/v1_dpo_demo.yaml """ from ..config.arg_parser import get_args data_args, *_ = get_args() data_engine = DataEngine(data_args=data_args) print(data_engine[0])
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/core/model_loader.py
src/llamafactory/v1/core/model_loader.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The definition of model loader. How to use: model_loader = ModelLoader(model_args, is_trainable=True) model_loader.processor: Get the tokenizer or multi-modal processor. model_loader.model_config: Get the model configuration. model_loader.model: Get the HF model. Init Workflow: 1. Init processor. 2. Init model config. 3. Init model. 4. Init adapter. """ import torch from accelerate import init_empty_weights from transformers import AutoConfig, AutoProcessor from ..accelerator.helper import DeviceType from ..accelerator.interface import DistributedInterface from ..config.model_args import ModelArguments, ModelClass from ..utils import logging from ..utils.types import HFConfig, HFModel, Processor logger = logging.get_logger(__name__) class ModelLoader: """Model loader. Args: model_args: Model arguments. is_trainable: Whether to train the model. """ def __init__(self, model_args: ModelArguments, is_train: bool = False) -> None: self.args = model_args """Model arguments.""" self.is_train = is_train """Whether to train the model.""" self.processor = self._init_processor() """Tokenizer or multi-modal processor.""" self.model_config = self._init_model_config() """Model configuration.""" self.model = self._init_model() """HF model.""" def _init_processor(self) -> Processor: """Init processor. NOTE: Transformers v5 always use fast tokenizer. https://github.com/huggingface/transformers/blob/v5.0.0rc1/src/transformers/models/auto/tokenization_auto.py#L642 """ return AutoProcessor.from_pretrained( self.args.model, trust_remote_code=self.args.trust_remote_code, ) def _init_model_config(self) -> HFConfig: """Init model config.""" return AutoConfig.from_pretrained( self.args.model, trust_remote_code=self.args.trust_remote_code, ) def _init_model(self) -> HFModel: """Init model. Let transformers handle the model init context. https://github.com/huggingface/transformers/blob/v5.0.0rc0/src/transformers/modeling_utils.py#L3538 """ if self.args.model_class == ModelClass.LLM: from transformers import AutoModelForCausalLM, AutoModelForImageTextToText if type(self.model_config) in AutoModelForImageTextToText._model_mapping.keys(): AutoClass = AutoModelForImageTextToText else: AutoClass = AutoModelForCausalLM elif self.args.model_class == ModelClass.CLS: from transformers import AutoModelForTokenClassification AutoClass = AutoModelForTokenClassification else: from transformers import AutoModel AutoClass = AutoModel if self.args.init_config is not None: from ..plugins.model_plugins.initialization import InitPlugin init_device = InitPlugin(self.args.init_config.name)() else: init_device = DistributedInterface().current_accelerator if init_device.type == DeviceType.META: with init_empty_weights(): model = AutoClass.from_config(self.model_config) else: model = AutoClass.from_pretrained( self.args.model, config=self.model_config, dtype="auto", device_map=init_device, trust_remote_code=self.args.trust_remote_code, ) if self.args.peft_config is None: if self.is_train: logger.info_rank0("Fine-tuning mode: full tuning") model = model.to(torch.float32) else: logger.info_rank0("Inference the original model") else: from ..plugins.model_plugins.peft import PeftPlugin model = PeftPlugin(self.args.peft_config.name)(model, self.args.peft_config, self.is_train) if self.args.kernel_config is not None: from ..plugins.model_plugins.kernels.interface import KernelPlugin model = KernelPlugin(self.args.kernel_config.name)( model=model, include_kernels=self.args.kernel_config.get("include_kernels") ) return model if __name__ == "__main__": """ python -m llamafactory.v1.core.model_loader --model llamafactory/tiny-random-qwen2.5 """ from ..config.arg_parser import get_args _, model_args, *_ = get_args() model_loader = ModelLoader(model_args=model_args) print(model_loader.processor) print(model_loader.model_config) print(model_loader.model)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/core/__init__.py
src/llamafactory/v1/core/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/core/base_trainer.py
src/llamafactory/v1/core/base_trainer.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The definition of trainer. Init Phase: 1. Init dataloader. 2. Init optimizer (deepspeed). 3. Shard model. 4. Init optimizer (fsdp). 5. Init scheduler. Train Phase: 1. Train Loop """ from ..config.training_args import TrainingArguments from ..utils.types import HFModel, Processor, TorchDataset from .trainer_utils.data_collator import DataCollator class BaseTrainer: def __init__( self, args: TrainingArguments, model: HFModel, processor: Processor, dataset: TorchDataset, ) -> None: self.args = args self.model = model self.processor = processor self.dataset = dataset self.data_collator = DataCollator() self.optimizer = None self.lr_scheduler = None def init_model_and_optimizer(self) -> None: pass def create_dataloader(self) -> None: pass def fit(self) -> None: pass
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/core/trainer_utils/data_loader.py
src/llamafactory/v1/core/trainer_utils/data_loader.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import sys from collections.abc import Generator, Iterator from dataclasses import dataclass from typing import Optional from torchdata.stateful_dataloader import StatefulDataLoader from torchdata.stateful_dataloader.sampler import StatefulDistributedSampler from ...utils.batching_queue import BaseBatchingQueue from ...utils.logging import get_logger from ...utils.types import Processor, TorchDataset from .data_collator import DataCollator logger = get_logger(__name__) # base dataloader class DistributedDataloader(StatefulDataLoader): """Base Distributed DataLoader.""" dataset: "TorchDataset" sampler: "StatefulDistributedSampler" def set_epoch(self, epoch: int) -> None: if self.sampler is not None and hasattr(self.sampler, "set_epoch"): self.sampler.set_epoch(epoch) elif hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) @dataclass class BaseDataLoader: """Default DataLoader.""" processor: Processor def __init__(self, dataset: TorchDataset) -> None: self.dataset = dataset # guidlines: fetch until get fixed batchsize. # save state_dict for buffer. # resume with state # 1. Init stateful dataloader (tokenize) # 2. Add to buffer (2 * max seq len per device) # 3. Yield batch indexes (micro batch * grad acc) # a ) non pack + non dynamic # b ) non pack + dynamic # c ) pack + non dynamic # d ) pack + dynamic def init_dataloader(self) -> None: ### init dataloader pass def __iter__(self) -> Iterator: pass def __next__(self) -> any: pass @dataclass class DataLoader: """Default DataLoader.""" processor: "Processor" dataloader: "DistributedDataloader" batching_queue: "BaseBatchingQueue" collate_fn: "DataCollator" num_micro_batch: int = 1 length: int = 0 drop_last: bool = True def __init__( self, dataloader: any, collate_fn: "DataCollator", num_micro_batch: int = 1, length: int = 0, drop_last: bool = True, batching_queue: Optional["BaseBatchingQueue"] = None, ) -> None: self.batching_queue = batching_queue self.num_micro_batch = num_micro_batch self.step = 0 self._collate_fn = collate_fn self._dataloader = dataloader self._drop_last = drop_last self._data_iter: Iterator self._resume = False self._batch_data_iter: Generator if length > 0: self._length = length elif length == -1: self._length = sys.maxsize else: self._length = len(self._dataloader) def __len__(self): return self._length def __iter__(self) -> Iterator: if not self._resume: self.step = 0 self._data_iter = iter(self._dataloader) self._batch_data_iter = self.batch_data_generator() self._resume = False return self def __next__(self): return next(self._batch_data_iter) # FIXME maybe we can move origin_batch_data_generator to here def origin_batch_data_generator(self): """Standard pass-through generator if do not use batching queue.""" while True: if self._length > 0 and self.step >= self._length: return try: batch = [] data = next(self._data_iter) # split data into micro batches for i in range(0, len(data), self.num_micro_batch): micro_batch = data[i : i + self.num_micro_batch] if self._collate_fn: micro_batch = self._collate_fn(micro_batch) batch.append(micro_batch) yield batch self.step += 1 except StopIteration: if self.step < self._length: # Restart iterator to fill the requested length self._data_iter = iter(self._dataloader) try: batch = [] data = next(self._data_iter) for i in range(0, len(data), self.num_micro_batch): micro_batch = data[i : i + self.num_micro_batch] if self._collate_fn: micro_batch = self._collate_fn(micro_batch) batch.append(micro_batch) yield batch self.step += 1 except StopIteration: return else: return except Exception as e: logger.error(f"DataLoader origin_batch_data_generator exception: {e}") raise def batch_data_generator(self): if self.batching_queue is None: yield from self.origin_batch_data_generator() return batch = [] while True: if self._length and self.step >= self._length: return if self.batching_queue.is_full_filled(): micro_batch = self.batching_queue.get_micro_batch(self.step) if self._collate_fn: micro_batch = self._collate_fn(micro_batch) batch.append(micro_batch) if len(batch) == self.num_micro_batch: yield batch self.step += 1 batch = [] try: processing_item = next(self._data_iter) except Exception as e: if isinstance(e, StopIteration): if self.step < self._length: # call iter until reach length self._data_iter = iter(self._dataloader) processing_item = next(self._data_iter) elif not self._drop_last and not self.batching_queue.empty(): while not self.batching_queue.empty(): micro_batch = self.batching_queue.get_micro_batch(self.step) if self._collate_fn: micro_batch = self._collate_fn(micro_batch) batch.append(micro_batch) if len(batch) == self.num_micro_batch: yield batch self.step += 1 batch = [] while len(batch) < self.num_micro_batch: padding_batch = copy.deepcopy(micro_batch) padding_batch["is_padded"] = True batch.append(padding_batch) yield batch self.step += 1 return else: return else: logger.error(f"DataLoader iter data exception: {e}") raise # put processing_item to buffer if isinstance(processing_item, dict): processing_item = [processing_item] for item in processing_item: self.batching_queue.put_item(item) def state_dict(self): # save state state = self.__dict__.copy() # remove internal fields for k in list(state.keys()): if k.startswith("_"): del state[k] # save dataloader state if hasattr(self._dataloader, "state_dict"): state["dataloader_state"] = self._dataloader.state_dict() elif hasattr(self._dataloader, "__getstate__"): state["dataloader_state"] = self._dataloader.__getstate__() batching_strategy = getattr(self, "batching_strategy", None) if batching_strategy and hasattr(batching_strategy, "state_dict"): state["batching_strategy_state"] = batching_strategy.state_dict() if "batching_strategy" in state: del state["batching_strategy"] return copy.deepcopy(state) def load_state_dict(self, state: dict[str, any]): if state["num_micro_batch"] != self.num_micro_batch: logger.warning( f"num_micro_batch changed: [ {state['num_micro_batch']} -> {self.num_micro_batch} ], will clear prefetch buffer" ) del state["num_micro_batch"] self.__dict__.update(state) self._resume = True if hasattr(self._dataloader, "load_state_dict"): self._dataloader.load_state_dict(state["dataloader_state"]) elif hasattr(self._dataloader, "__getstate__"): self._dataloader.__setstate__(state["dataloader_state"]) if "batching_strategy_state" in state: batching_strategy = getattr(self, "batching_strategy", None) if batching_strategy: batching_strategy.load_state_dict(state["batching_strategy_state"]) del state["batching_strategy_state"] self._data_iter = iter(self._dataloader) self._batch_data_iter = self.batch_data_generator() def set_epoch(self, epoch: int) -> None: if hasattr(self._dataloader, "set_epoch"): self._dataloader.set_epoch(epoch)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/core/trainer_utils/callback.py
src/llamafactory/v1/core/trainer_utils/callback.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/core/trainer_utils/__init__.py
src/llamafactory/v1/core/trainer_utils/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/core/trainer_utils/data_collator.py
src/llamafactory/v1/core/trainer_utils/data_collator.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from collections.abc import Sequence from dataclasses import dataclass from typing import Any import torch import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence from torch.utils.data._utils.collate import default_collate from ....extras.constants import IGNORE_INDEX from ...plugins.data_plugins.template import Template from ...utils.types import Processor, Tensor def len2culen(seqlens: "torch.Tensor") -> "torch.Tensor": # FIXME move to utils """Convert sequence lengths to cumulative sequence lengths.""" return F.pad(torch.cumsum(seqlens, dim=0), (1, 0)).type(torch.int32) class DataCollator: """Default Data collator.""" processor: "Processor" # processor name -> map to encode_messages function def __post_init__(self): # callback for text tokenizer self.tokenizer = self.processor.tokenizer if hasattr(self.processor, "tokenizer") else self.processor def __call__(self, features: list[dict[str, Any]]) -> dict[str, Tensor]: """Collate features into a batch.""" batch = defaultdict(list) # batching features for feature in features: for key in feature.keys(): batch[key].append(feature[key]) for key in batch.keys(): # process padding features if key in ["input_ids", "attention_mask", "position_ids"]: padding_value = self.tokenizer.pad_token_id if key == "input_ids" else 0 batch[key] = pad_sequence(batch[key], batch_first=True, padding_value=padding_value) elif key in ["labels"]: batch[key] = pad_sequence(batch[key], batch_first=True, padding_value=IGNORE_INDEX) else: batch[key] = default_collate(batch[key]) return batch # sft: messages # dpo: chosen_messages, rejected_messages @dataclass class DefaultCollator(DataCollator): """Example for now.""" processor: "Processor" # processor name -> map to encode_messages function template: "Template" def __call__(self, messages: list[list[dict[str, Any]]]) -> dict[str, Tensor]: features = [] # Check if data is already tokenized (contains input_ids) if messages and isinstance(messages[0], dict) and "input_ids" in messages[0]: for feature in messages: if not isinstance(feature, dict): raise ValueError(f"Expected dict but got {type(feature)}") tensor_feature = { k: torch.tensor(v, dtype=torch.long) if not isinstance(v, torch.Tensor) else v for k, v in feature.items() } features.append(tensor_feature) else: # raw messages need to be encoded for message in messages: encoded_message = self.template.encode_messages(self.tokenizer, message) encoded_message = {k: torch.tensor(v, dtype=torch.long) for k, v in encoded_message.items()} features.append(encoded_message) return super().__call__(features) @dataclass class PairwiseCollator(DataCollator): pass @dataclass class DataCollatorWithPacking(DefaultCollator): """Data collator with packing.""" processor: "Processor" template: "Template" def __call__(self, features: Sequence[dict[str, "torch.Tensor"]]) -> dict[str, "torch.Tensor"]: seqlens = torch.tensor([len(feature["input_ids"]) for feature in features], dtype=torch.long) batch = {"cu_seqlens": len2culen(seqlens)} for input_name in features[0].keys(): if input_name in ("input_ids", "attention_mask", "labels"): batch[input_name] = torch.cat([feature[input_name] for feature in features]) else: batch[input_name] = default_collate([feature[input_name] for feature in features]) return batch
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/core/trainer_utils/lr_scheduler.py
src/llamafactory/v1/core/trainer_utils/lr_scheduler.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/config/data_args.py
src/llamafactory/v1/config/data_args.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field @dataclass class DataArguments: dataset: str | None = field( default=None, metadata={"help": "Path to the dataset."}, ) cutoff_len: int = field( default=2048, metadata={"help": "Cutoff length for the dataset."}, )
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/config/sample_args.py
src/llamafactory/v1/config/sample_args.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from .arg_utils import SampleBackend @dataclass class SampleArguments: sample_backend: SampleBackend = field( default=SampleBackend.HF, metadata={"help": "Sampling backend, default to 'hf'."}, ) max_new_tokens: int = field( default=128, metadata={"help": "Maximum number of new tokens to generate."}, )
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/config/training_args.py
src/llamafactory/v1/config/training_args.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from dataclasses import dataclass, field from uuid import uuid4 from .arg_utils import PluginConfig, get_plugin_config @dataclass class TrainingArguments: output_dir: str = field( default=os.path.join("outputs", str(uuid4().hex)), metadata={"help": "Path to the output directory."}, ) micro_batch_size: int = field( default=1, metadata={"help": "Micro batch size for training."}, ) global_batch_size: int = field( default=1, metadata={"help": "Global batch size for training."}, ) learning_rate: float = field( default=1e-4, metadata={"help": "Learning rate for training."}, ) bf16: bool = field( default=False, metadata={"help": "Use bf16 for training."}, ) dist_config: PluginConfig | None = field( default=None, metadata={"help": "Distribution configuration for training."}, ) def __post_init__(self) -> None: self.dist_config = get_plugin_config(self.dist_config)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/config/model_args.py
src/llamafactory/v1/config/model_args.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from .arg_utils import ModelClass, PluginConfig, get_plugin_config @dataclass class ModelArguments: model: str = field( metadata={"help": "Path to the model or model identifier from Hugging Face."}, ) trust_remote_code: bool = field( default=False, metadata={"help": "Trust remote code from Hugging Face."}, ) model_class: ModelClass = field( default=ModelClass.LLM, metadata={"help": "Model class from Hugging Face."}, ) init_config: PluginConfig | None = field( default=None, metadata={"help": "Initialization configuration for the model."}, ) peft_config: PluginConfig | None = field( default=None, metadata={"help": "PEFT configuration for the model."}, ) kernel_config: PluginConfig | None = field( default=None, metadata={"help": "Kernel configuration for the model."}, ) quant_config: PluginConfig | None = field( default=None, metadata={"help": "Quantization configuration for the model."}, ) def __post_init__(self) -> None: self.init_config = get_plugin_config(self.init_config) self.peft_config = get_plugin_config(self.peft_config) self.kernel_config = get_plugin_config(self.kernel_config) self.quant_config = get_plugin_config(self.quant_config)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/config/arg_utils.py
src/llamafactory/v1/config/arg_utils.py
# Copyright 2025 HuggingFace Inc. and the LlamaFactory team. # # This code is inspired by the HuggingFace's transformers library. # https://github.com/huggingface/transformers/blob/v5.0.0rc0/src/transformers/training_args.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from enum import Enum, unique class PluginConfig(dict): """Dictionary that allows attribute access.""" @property def name(self) -> str: """Plugin name.""" if "name" not in self: raise ValueError("Plugin configuration must have a 'name' field.") return self["name"] PluginArgument = PluginConfig | dict | str | None @unique class ModelClass(str, Enum): """Auto class for model config.""" LLM = "llm" CLS = "cls" OTHER = "other" @unique class SampleBackend(str, Enum): HF = "hf" VLLM = "vllm" def _convert_str_dict(data: dict) -> dict: """Parse string representation inside the dictionary. Args: data: The string or dictionary to convert. Returns: The converted dictionary. """ for key, value in data.items(): if isinstance(value, dict): data[key] = _convert_str_dict(value) elif isinstance(value, str): if value.lower() in ("true", "false"): data[key] = value.lower() == "true" elif value.isdigit(): data[key] = int(value) elif value.replace(".", "", 1).isdigit(): data[key] = float(value) return data def get_plugin_config(config: PluginArgument) -> PluginConfig | None: """Get the plugin configuration from the argument value. Args: config: The argument value to get the plugin configuration from. Returns: The plugin configuration. """ if config is None: return None if isinstance(config, str) and config.startswith("{"): config = json.loads(config) config = _convert_str_dict(config) if "name" not in config: raise ValueError("Plugin configuration must have a 'name' field.") return PluginConfig(config)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/config/__init__.py
src/llamafactory/v1/config/__init__.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .arg_parser import InputArgument, get_args from .arg_utils import ModelClass, SampleBackend from .data_args import DataArguments from .model_args import ModelArguments from .sample_args import SampleArguments from .training_args import TrainingArguments __all__ = [ "DataArguments", "InputArgument", "ModelArguments", "ModelClass", "SampleArguments", "SampleBackend", "TrainingArguments", "get_args", ]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/v1/config/arg_parser.py
src/llamafactory/v1/config/arg_parser.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import sys from pathlib import Path from typing import Any from omegaconf import OmegaConf from transformers import HfArgumentParser from ...extras.misc import is_env_enabled from .data_args import DataArguments from .model_args import ModelArguments from .sample_args import SampleArguments from .training_args import TrainingArguments InputArgument = dict[str, Any] | list[str] | None def validate_args( data_args: DataArguments, model_args: ModelArguments, training_args: TrainingArguments, sample_args: SampleArguments, ): """Validate arguments.""" if ( model_args.quant_config is not None and training_args.dist_config is not None and training_args.dist_config.name == "deepspeed" ): raise ValueError("Quantization is not supported with deepspeed backend.") def get_args(args: InputArgument = None) -> tuple[DataArguments, ModelArguments, TrainingArguments, SampleArguments]: """Parse arguments from command line or config file.""" parser = HfArgumentParser([DataArguments, ModelArguments, TrainingArguments, SampleArguments]) allow_extra_keys = is_env_enabled("ALLOW_EXTRA_KEYS") if args is None: if len(sys.argv) > 1 and (sys.argv[1].endswith(".yaml") or sys.argv[1].endswith(".yml")): override_config = OmegaConf.from_cli(sys.argv[2:]) dict_config = OmegaConf.load(Path(sys.argv[1]).absolute()) args = OmegaConf.to_container(OmegaConf.merge(dict_config, override_config)) elif len(sys.argv) > 1 and sys.argv[1].endswith(".json"): override_config = OmegaConf.from_cli(sys.argv[2:]) dict_config = OmegaConf.create(json.load(Path(sys.argv[1]).absolute())) args = OmegaConf.to_container(OmegaConf.merge(dict_config, override_config)) else: # list of strings args = sys.argv[1:] if isinstance(args, dict): (*parsed_args,) = parser.parse_dict(args, allow_extra_keys=allow_extra_keys) else: (*parsed_args, unknown_args) = parser.parse_args_into_dataclasses(args, return_remaining_strings=True) if unknown_args and not allow_extra_keys: print(parser.format_help()) print(f"Got unknown args, potentially deprecated arguments: {unknown_args}") raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {unknown_args}") validate_args(*parsed_args) return tuple(parsed_args) if __name__ == "__main__": print(get_args())
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/api/common.py
src/llamafactory/api/common.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ipaddress import json import os import socket from typing import TYPE_CHECKING, Any from urllib.parse import urlparse from ..extras.misc import is_env_enabled from ..extras.packages import is_fastapi_available if is_fastapi_available(): from fastapi import HTTPException, status if TYPE_CHECKING: from pydantic import BaseModel SAFE_MEDIA_PATH = os.environ.get("SAFE_MEDIA_PATH", os.path.join(os.path.dirname(__file__), "safe_media")) ALLOW_LOCAL_FILES = is_env_enabled("ALLOW_LOCAL_FILES", "1") def dictify(data: "BaseModel") -> dict[str, Any]: try: # pydantic v2 return data.model_dump(exclude_unset=True) except AttributeError: # pydantic v1 return data.dict(exclude_unset=True) def jsonify(data: "BaseModel") -> str: try: # pydantic v2 return json.dumps(data.model_dump(exclude_unset=True), ensure_ascii=False) except AttributeError: # pydantic v1 return data.json(exclude_unset=True, ensure_ascii=False) def check_lfi_path(path: str) -> None: """Checks if a given path is vulnerable to LFI. Raises HTTPException if unsafe.""" if not ALLOW_LOCAL_FILES: raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Local file access is disabled.") try: os.makedirs(SAFE_MEDIA_PATH, exist_ok=True) real_path = os.path.realpath(path) safe_path = os.path.realpath(SAFE_MEDIA_PATH) if not real_path.startswith(safe_path): raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="File access is restricted to the safe media directory." ) except Exception: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid or inaccessible file path.") def check_ssrf_url(url: str) -> None: """Checks if a given URL is vulnerable to SSRF. Raises HTTPException if unsafe.""" try: parsed_url = urlparse(url) if parsed_url.scheme not in ["http", "https"]: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Only HTTP/HTTPS URLs are allowed.") hostname = parsed_url.hostname if not hostname: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid URL hostname.") ip_info = socket.getaddrinfo(hostname, parsed_url.port) ip_address_str = ip_info[0][4][0] ip = ipaddress.ip_address(ip_address_str) if not ip.is_global: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="Access to private or reserved IP addresses is not allowed.", ) except socket.gaierror: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=f"Could not resolve hostname: {parsed_url.hostname}" ) except Exception as e: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"Invalid URL: {e}")
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/api/chat.py
src/llamafactory/api/chat.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import io import json import os import re import uuid from collections.abc import AsyncGenerator from typing import TYPE_CHECKING, Optional from ..data import Role as DataRole from ..extras import logging from ..extras.constants import AUDIO_PLACEHOLDER, IMAGE_PLACEHOLDER, VIDEO_PLACEHOLDER from ..extras.misc import is_env_enabled from ..extras.packages import is_fastapi_available, is_pillow_available, is_requests_available from .common import check_lfi_path, check_ssrf_url, dictify, jsonify from .protocol import ( ChatCompletionMessage, ChatCompletionResponse, ChatCompletionResponseChoice, ChatCompletionResponseUsage, ChatCompletionStreamResponse, ChatCompletionStreamResponseChoice, Finish, Function, FunctionCall, Role, ScoreEvaluationResponse, ) if is_fastapi_available(): from fastapi import HTTPException, status if is_pillow_available(): from PIL import Image if is_requests_available(): import requests if TYPE_CHECKING: from ..chat import ChatModel from ..data.mm_plugin import AudioInput, ImageInput, VideoInput from .protocol import ChatCompletionRequest, ScoreEvaluationRequest logger = logging.get_logger(__name__) ROLE_MAPPING = { Role.USER: DataRole.USER.value, Role.ASSISTANT: DataRole.ASSISTANT.value, Role.SYSTEM: DataRole.SYSTEM.value, Role.FUNCTION: DataRole.FUNCTION.value, Role.TOOL: DataRole.OBSERVATION.value, } def _process_request( request: "ChatCompletionRequest", ) -> tuple[ list[dict[str, str]], Optional[str], Optional[str], Optional[list["ImageInput"]], Optional[list["VideoInput"]], Optional[list["AudioInput"]], ]: if is_env_enabled("API_VERBOSE", "1"): logger.info_rank0(f"==== request ====\n{json.dumps(dictify(request), indent=2, ensure_ascii=False)}") if len(request.messages) == 0: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid length") if request.messages[0].role == Role.SYSTEM: content = request.messages.pop(0).content system = content[0].text if isinstance(content, list) else content else: system = None if len(request.messages) % 2 == 0: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Only supports u/a/u/a/u...") input_messages = [] images, videos, audios = [], [], [] for i, message in enumerate(request.messages): if i % 2 == 0 and message.role not in [Role.USER, Role.TOOL]: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid role") elif i % 2 == 1 and message.role not in [Role.ASSISTANT, Role.FUNCTION]: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid role") if message.role == Role.ASSISTANT and isinstance(message.tool_calls, list) and len(message.tool_calls): tool_calls = [ {"name": tool_call.function.name, "arguments": tool_call.function.arguments} for tool_call in message.tool_calls ] content = json.dumps(tool_calls, ensure_ascii=False) input_messages.append({"role": ROLE_MAPPING[Role.FUNCTION], "content": content}) elif isinstance(message.content, list): text_content = "" for input_item in message.content: if input_item.type == "text": text_content += input_item.text elif input_item.type == "image_url": text_content += IMAGE_PLACEHOLDER image_url = input_item.image_url.url if re.match(r"^data:image\/(png|jpg|jpeg|gif|bmp);base64,(.+)$", image_url): # base64 image image_stream = io.BytesIO(base64.b64decode(image_url.split(",", maxsplit=1)[1])) elif os.path.isfile(image_url): # local file check_lfi_path(image_url) image_stream = open(image_url, "rb") else: # web uri check_ssrf_url(image_url) image_stream = requests.get(image_url, stream=True).raw images.append(Image.open(image_stream).convert("RGB")) elif input_item.type == "video_url": text_content += VIDEO_PLACEHOLDER video_url = input_item.video_url.url if re.match(r"^data:video\/(mp4|mkv|avi|mov);base64,(.+)$", video_url): # base64 video video_stream = io.BytesIO(base64.b64decode(video_url.split(",", maxsplit=1)[1])) elif os.path.isfile(video_url): # local file check_lfi_path(video_url) video_stream = video_url else: # web uri check_ssrf_url(video_url) video_stream = requests.get(video_url, stream=True).raw videos.append(video_stream) elif input_item.type == "audio_url": text_content += AUDIO_PLACEHOLDER audio_url = input_item.audio_url.url if re.match(r"^data:audio\/(mpeg|mp3|wav|ogg);base64,(.+)$", audio_url): # base64 audio audio_stream = io.BytesIO(base64.b64decode(audio_url.split(",", maxsplit=1)[1])) elif os.path.isfile(audio_url): # local file check_lfi_path(audio_url) audio_stream = audio_url else: # web uri check_ssrf_url(audio_url) audio_stream = requests.get(audio_url, stream=True).raw audios.append(audio_stream) else: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=f"Invalid input type {input_item.type}." ) input_messages.append({"role": ROLE_MAPPING[message.role], "content": text_content}) else: input_messages.append({"role": ROLE_MAPPING[message.role], "content": message.content}) tool_list = request.tools if isinstance(tool_list, list) and len(tool_list): try: tools = json.dumps([dictify(tool.function) for tool in tool_list], ensure_ascii=False) except json.JSONDecodeError: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid tools") else: tools = None return input_messages, system, tools, images or None, videos or None, audios or None def _create_stream_chat_completion_chunk( completion_id: str, model: str, delta: "ChatCompletionMessage", index: Optional[int] = 0, finish_reason: Optional["Finish"] = None, ) -> str: choice_data = ChatCompletionStreamResponseChoice(index=index, delta=delta, finish_reason=finish_reason) chunk = ChatCompletionStreamResponse(id=completion_id, model=model, choices=[choice_data]) return jsonify(chunk) async def create_chat_completion_response( request: "ChatCompletionRequest", chat_model: "ChatModel" ) -> "ChatCompletionResponse": completion_id = f"chatcmpl-{uuid.uuid4().hex}" input_messages, system, tools, images, videos, audios = _process_request(request) responses = await chat_model.achat( input_messages, system, tools, images, videos, audios, do_sample=request.do_sample, temperature=request.temperature, top_p=request.top_p, max_new_tokens=request.max_tokens, num_return_sequences=request.n, repetition_penalty=request.presence_penalty, stop=request.stop, ) prompt_length, response_length = 0, 0 choices = [] for i, response in enumerate(responses): if tools: result = chat_model.engine.template.extract_tool(response.response_text) else: result = response.response_text if isinstance(result, list): tool_calls = [] for tool in result: function = Function(name=tool.name, arguments=tool.arguments) tool_calls.append(FunctionCall(id=f"call_{uuid.uuid4().hex}", function=function)) response_message = ChatCompletionMessage(role=Role.ASSISTANT, tool_calls=tool_calls) finish_reason = Finish.TOOL else: response_message = ChatCompletionMessage(role=Role.ASSISTANT, content=result) finish_reason = Finish.STOP if response.finish_reason == "stop" else Finish.LENGTH choices.append(ChatCompletionResponseChoice(index=i, message=response_message, finish_reason=finish_reason)) prompt_length = response.prompt_length response_length += response.response_length usage = ChatCompletionResponseUsage( prompt_tokens=prompt_length, completion_tokens=response_length, total_tokens=prompt_length + response_length, ) return ChatCompletionResponse(id=completion_id, model=request.model, choices=choices, usage=usage) async def create_stream_chat_completion_response( request: "ChatCompletionRequest", chat_model: "ChatModel" ) -> AsyncGenerator[str, None]: completion_id = f"chatcmpl-{uuid.uuid4().hex}" input_messages, system, tools, images, videos, audios = _process_request(request) if tools: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Cannot stream function calls.") if request.n > 1: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Cannot stream multiple responses.") yield _create_stream_chat_completion_chunk( completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(role=Role.ASSISTANT, content="") ) async for new_token in chat_model.astream_chat( input_messages, system, tools, images, videos, audios, do_sample=request.do_sample, temperature=request.temperature, top_p=request.top_p, max_new_tokens=request.max_tokens, repetition_penalty=request.presence_penalty, stop=request.stop, ): if len(new_token) != 0: yield _create_stream_chat_completion_chunk( completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(content=new_token) ) yield _create_stream_chat_completion_chunk( completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(), finish_reason=Finish.STOP ) yield "[DONE]" async def create_score_evaluation_response( request: "ScoreEvaluationRequest", chat_model: "ChatModel" ) -> "ScoreEvaluationResponse": score_id = f"scoreval-{uuid.uuid4().hex}" if len(request.messages) == 0: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid request") scores = await chat_model.aget_scores(request.messages, max_length=request.max_length) return ScoreEvaluationResponse(id=score_id, model=request.model, scores=scores)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/api/__init__.py
src/llamafactory/api/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/api/protocol.py
src/llamafactory/api/protocol.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from enum import Enum, unique from typing import Any, Literal from pydantic import BaseModel, Field @unique class Role(str, Enum): USER = "user" ASSISTANT = "assistant" SYSTEM = "system" FUNCTION = "function" TOOL = "tool" @unique class Finish(str, Enum): STOP = "stop" LENGTH = "length" TOOL = "tool_calls" class ModelCard(BaseModel): id: str object: Literal["model"] = "model" created: int = Field(default_factory=lambda: int(time.time())) owned_by: Literal["owner"] = "owner" class ModelList(BaseModel): object: Literal["list"] = "list" data: list[ModelCard] = [] class Function(BaseModel): name: str arguments: str class FunctionDefinition(BaseModel): name: str description: str parameters: dict[str, Any] class FunctionAvailable(BaseModel): type: Literal["function", "code_interpreter"] = "function" function: FunctionDefinition | None = None class FunctionCall(BaseModel): id: str type: Literal["function"] = "function" function: Function class URL(BaseModel): url: str detail: Literal["auto", "low", "high"] = "auto" class MultimodalInputItem(BaseModel): type: Literal["text", "image_url", "video_url", "audio_url"] text: str | None = None image_url: URL | None = None video_url: URL | None = None audio_url: URL | None = None class ChatMessage(BaseModel): role: Role content: str | list[MultimodalInputItem] | None = None tool_calls: list[FunctionCall] | None = None class ChatCompletionMessage(BaseModel): role: Role | None = None content: str | None = None tool_calls: list[FunctionCall] | None = None class ChatCompletionRequest(BaseModel): model: str messages: list[ChatMessage] tools: list[FunctionAvailable] | None = None do_sample: bool | None = None temperature: float | None = None top_p: float | None = None n: int = 1 presence_penalty: float | None = None max_tokens: int | None = None stop: str | list[str] | None = None stream: bool = False class ChatCompletionResponseChoice(BaseModel): index: int message: ChatCompletionMessage finish_reason: Finish class ChatCompletionStreamResponseChoice(BaseModel): index: int delta: ChatCompletionMessage finish_reason: Finish | None = None class ChatCompletionResponseUsage(BaseModel): prompt_tokens: int completion_tokens: int total_tokens: int class ChatCompletionResponse(BaseModel): id: str object: Literal["chat.completion"] = "chat.completion" created: int = Field(default_factory=lambda: int(time.time())) model: str choices: list[ChatCompletionResponseChoice] usage: ChatCompletionResponseUsage class ChatCompletionStreamResponse(BaseModel): id: str object: Literal["chat.completion.chunk"] = "chat.completion.chunk" created: int = Field(default_factory=lambda: int(time.time())) model: str choices: list[ChatCompletionStreamResponseChoice] class ScoreEvaluationRequest(BaseModel): model: str messages: list[str] max_length: int | None = None class ScoreEvaluationResponse(BaseModel): id: str object: Literal["score.evaluation"] = "score.evaluation" model: str scores: list[float]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/api/app.py
src/llamafactory/api/app.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import os from contextlib import asynccontextmanager from functools import partial from typing import Annotated from ..chat import ChatModel from ..extras.constants import EngineName from ..extras.misc import torch_gc from ..extras.packages import is_fastapi_available, is_starlette_available, is_uvicorn_available from .chat import ( create_chat_completion_response, create_score_evaluation_response, create_stream_chat_completion_response, ) from .protocol import ( ChatCompletionRequest, ChatCompletionResponse, ModelCard, ModelList, ScoreEvaluationRequest, ScoreEvaluationResponse, ) if is_fastapi_available(): from fastapi import Depends, FastAPI, HTTPException, status from fastapi.middleware.cors import CORSMiddleware from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer if is_starlette_available(): from sse_starlette import EventSourceResponse if is_uvicorn_available(): import uvicorn async def sweeper() -> None: while True: torch_gc() await asyncio.sleep(300) @asynccontextmanager async def lifespan(app: "FastAPI", chat_model: "ChatModel"): # collects GPU memory if chat_model.engine.name == EngineName.HF: asyncio.create_task(sweeper()) yield torch_gc() def create_app(chat_model: "ChatModel") -> "FastAPI": root_path = os.getenv("FASTAPI_ROOT_PATH", "") app = FastAPI(lifespan=partial(lifespan, chat_model=chat_model), root_path=root_path) app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) api_key = os.getenv("API_KEY") security = HTTPBearer(auto_error=False) async def verify_api_key(auth: Annotated[HTTPAuthorizationCredentials | None, Depends(security)]): if api_key and (auth is None or auth.credentials != api_key): raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API key.") @app.get( "/v1/models", response_model=ModelList, status_code=status.HTTP_200_OK, dependencies=[Depends(verify_api_key)], ) async def list_models(): model_card = ModelCard(id=os.getenv("API_MODEL_NAME", "gpt-3.5-turbo")) return ModelList(data=[model_card]) @app.post( "/v1/chat/completions", response_model=ChatCompletionResponse, status_code=status.HTTP_200_OK, dependencies=[Depends(verify_api_key)], ) async def create_chat_completion(request: ChatCompletionRequest): if not chat_model.engine.can_generate: raise HTTPException(status_code=status.HTTP_405_METHOD_NOT_ALLOWED, detail="Not allowed") if request.stream: generate = create_stream_chat_completion_response(request, chat_model) return EventSourceResponse(generate, media_type="text/event-stream", sep="\n") else: return await create_chat_completion_response(request, chat_model) @app.post( "/v1/score/evaluation", response_model=ScoreEvaluationResponse, status_code=status.HTTP_200_OK, dependencies=[Depends(verify_api_key)], ) async def create_score_evaluation(request: ScoreEvaluationRequest): if chat_model.engine.can_generate: raise HTTPException(status_code=status.HTTP_405_METHOD_NOT_ALLOWED, detail="Not allowed") return await create_score_evaluation_response(request, chat_model) return app def run_api() -> None: chat_model = ChatModel() app = create_app(chat_model) api_host = os.getenv("API_HOST", "0.0.0.0") api_port = int(os.getenv("API_PORT", "8000")) print(f"Visit http://localhost:{api_port}/docs for API document.") uvicorn.run(app, host=api_host, port=api_port)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/hparams/generating_args.py
src/llamafactory/hparams/generating_args.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import asdict, dataclass, field from typing import Any from transformers import GenerationConfig @dataclass class GeneratingArguments: r"""Arguments pertaining to specify the decoding parameters.""" do_sample: bool = field( default=True, metadata={"help": "Whether or not to use sampling, use greedy decoding otherwise."}, ) temperature: float = field( default=0.95, metadata={"help": "The value used to modulate the next token probabilities."}, ) top_p: float = field( default=0.7, metadata={ "help": ( "The smallest set of most probable tokens with probabilities that add up to top_p or higher are kept." ) }, ) top_k: int = field( default=50, metadata={"help": "The number of highest probability vocabulary tokens to keep for top-k filtering."}, ) num_beams: int = field( default=1, metadata={"help": "Number of beams for beam search. 1 means no beam search."}, ) max_length: int = field( default=1024, metadata={"help": "The maximum length the generated tokens can have. It can be overridden by max_new_tokens."}, ) max_new_tokens: int = field( default=1024, metadata={"help": "The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt."}, ) repetition_penalty: float = field( default=1.0, metadata={"help": "The parameter for repetition penalty. 1.0 means no penalty."}, ) length_penalty: float = field( default=1.0, metadata={"help": "Exponential penalty to the length that is used with beam-based generation."}, ) skip_special_tokens: bool = field( default=True, metadata={"help": "Whether or not to remove special tokens in the decoding."}, ) def to_dict(self, obey_generation_config: bool = False) -> dict[str, Any]: args = asdict(self) if args.get("max_new_tokens", -1) > 0: args.pop("max_length", None) else: args.pop("max_new_tokens", None) if obey_generation_config: generation_config = GenerationConfig() for key in list(args.keys()): if not hasattr(generation_config, key): args.pop(key) return args
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/hparams/data_args.py
src/llamafactory/hparams/data_args.py
# Copyright 2025 HuggingFace Inc. and the LlamaFactory team. # # This code is inspired by the HuggingFace's transformers library. # https://github.com/huggingface/transformers/blob/v4.40.0/examples/pytorch/language-modeling/run_clm.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import asdict, dataclass, field from typing import Any, Literal @dataclass class DataArguments: r"""Arguments pertaining to what data we are going to input our model for training and evaluation.""" template: str | None = field( default=None, metadata={"help": "Which template to use for constructing prompts in training and inference."}, ) dataset: str | None = field( default=None, metadata={"help": "The name of dataset(s) to use for training. Use commas to separate multiple datasets."}, ) eval_dataset: str | None = field( default=None, metadata={"help": "The name of dataset(s) to use for evaluation. Use commas to separate multiple datasets."}, ) dataset_dir: str = field( default="data", metadata={"help": "Path to the folder containing the datasets."}, ) media_dir: str | None = field( default=None, metadata={"help": "Path to the folder containing the images, videos or audios. Defaults to `dataset_dir`."}, ) cutoff_len: int = field( default=2048, metadata={"help": "The cutoff length of the tokenized inputs in the dataset."}, ) train_on_prompt: bool = field( default=False, metadata={"help": "Whether or not to disable the mask on the prompt."}, ) mask_history: bool = field( default=False, metadata={"help": "Whether or not to mask the history and train on the last turn only."}, ) streaming: bool = field( default=False, metadata={"help": "Enable dataset streaming."}, ) buffer_size: int = field( default=16384, metadata={"help": "Size of the buffer to randomly sample examples from in dataset streaming."}, ) mix_strategy: Literal["concat", "interleave_under", "interleave_over"] = field( default="concat", metadata={"help": "Strategy to use in dataset mixing (concat/interleave) (undersampling/oversampling)."}, ) interleave_probs: str | None = field( default=None, metadata={"help": "Probabilities to sample data from datasets. Use commas to separate multiple datasets."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets."}, ) preprocessing_batch_size: int = field( default=1000, metadata={"help": "The number of examples in one group in pre-processing."}, ) preprocessing_num_workers: int | None = field( default=None, metadata={"help": "The number of processes to use for the pre-processing."}, ) max_samples: int | None = field( default=None, metadata={"help": "For debugging purposes, truncate the number of examples for each dataset."}, ) eval_num_beams: int | None = field( default=None, metadata={"help": "Number of beams to use for evaluation. This argument will be passed to `model.generate`"}, ) ignore_pad_token_for_loss: bool = field( default=True, metadata={"help": "Whether or not to ignore the tokens corresponding to the pad label in loss computation."}, ) val_size: float = field( default=0.0, metadata={"help": "Size of the validation set, should be an integer or a float in range `[0,1)`."}, ) eval_on_each_dataset: bool = field( default=False, metadata={"help": "Whether or not to evaluate on each dataset separately."}, ) packing: bool | None = field( default=None, metadata={"help": "Enable sequences packing in training. Will automatically enable in pre-training."}, ) neat_packing: bool = field( default=False, metadata={"help": "Enable sequence packing without cross-attention."}, ) tool_format: str | None = field( default=None, metadata={"help": "Tool format to use for constructing function calling examples."}, ) default_system: str | None = field( default=None, metadata={"help": "Override the default system message in the template."}, ) enable_thinking: bool | None = field( default=True, metadata={"help": "Whether or not to enable thinking mode for reasoning models."}, ) tokenized_path: str | None = field( default=None, metadata={ "help": ( "Path to save or load the tokenized datasets. " "If tokenized_path not exists, it will save the tokenized datasets. " "If tokenized_path exists, it will load the tokenized datasets." ) }, ) data_shared_file_system: bool = field( default=False, metadata={"help": "Whether or not to use a shared file system for the datasets."}, ) def __post_init__(self): def split_arg(arg): if isinstance(arg, str): return [item.strip() for item in arg.split(",")] return arg self.dataset = split_arg(self.dataset) self.eval_dataset = split_arg(self.eval_dataset) if self.media_dir is None: self.media_dir = self.dataset_dir if self.dataset is None and self.val_size > 1e-6: raise ValueError("Cannot specify `val_size` if `dataset` is None.") if self.eval_dataset is not None and self.val_size > 1e-6: raise ValueError("Cannot specify `val_size` if `eval_dataset` is not None.") if self.interleave_probs is not None: if self.mix_strategy == "concat": raise ValueError("`interleave_probs` is only valid for interleaved mixing.") self.interleave_probs = list(map(float, split_arg(self.interleave_probs))) if self.dataset is not None and len(self.dataset) != len(self.interleave_probs): raise ValueError("The length of dataset and interleave probs should be identical.") if self.eval_dataset is not None and len(self.eval_dataset) != len(self.interleave_probs): raise ValueError("The length of eval dataset and interleave probs should be identical.") if self.streaming and self.val_size > 1e-6 and self.val_size < 1: raise ValueError("Streaming mode should have an integer val size.") if self.streaming and self.max_samples is not None: raise ValueError("`max_samples` is incompatible with `streaming`.") if self.mask_history and self.train_on_prompt: raise ValueError("`mask_history` is incompatible with `train_on_prompt`.") if self.neat_packing: self.packing = True if self.packing: self.cutoff_len -= 1 # avoid pad_to_multiple_of, needs improve def to_dict(self) -> dict[str, Any]: return asdict(self)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/hparams/parser.py
src/llamafactory/hparams/parser.py
# Copyright 2025 HuggingFace Inc. and the LlamaFactory team. # # This code is inspired by the HuggingFace's transformers library. # https://github.com/huggingface/transformers/blob/v4.40.0/examples/pytorch/language-modeling/run_clm.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from pathlib import Path from typing import Any, Optional import torch import transformers from omegaconf import OmegaConf from transformers import HfArgumentParser from transformers.integrations import is_deepspeed_zero3_enabled from transformers.trainer_utils import get_last_checkpoint from transformers.training_args import ParallelMode from transformers.utils import is_torch_bf16_gpu_available, is_torch_npu_available from ..extras import logging from ..extras.constants import CHECKPOINT_NAMES, EngineName from ..extras.misc import check_dependencies, check_version, get_current_device, is_env_enabled from ..extras.packages import is_mcore_adapter_available, is_transformers_version_greater_than from .data_args import DataArguments from .evaluation_args import EvaluationArguments from .finetuning_args import FinetuningArguments from .generating_args import GeneratingArguments from .model_args import ModelArguments from .training_args import RayArguments, TrainingArguments logger = logging.get_logger(__name__) check_dependencies() _TRAIN_ARGS = [ModelArguments, DataArguments, TrainingArguments, FinetuningArguments, GeneratingArguments] _TRAIN_CLS = tuple[ModelArguments, DataArguments, TrainingArguments, FinetuningArguments, GeneratingArguments] _INFER_ARGS = [ModelArguments, DataArguments, FinetuningArguments, GeneratingArguments] _INFER_CLS = tuple[ModelArguments, DataArguments, FinetuningArguments, GeneratingArguments] _EVAL_ARGS = [ModelArguments, DataArguments, EvaluationArguments, FinetuningArguments] _EVAL_CLS = tuple[ModelArguments, DataArguments, EvaluationArguments, FinetuningArguments] if is_mcore_adapter_available() and is_env_enabled("USE_MCA"): from mcore_adapter import TrainingArguments as McaTrainingArguments _TRAIN_MCA_ARGS = [ModelArguments, DataArguments, McaTrainingArguments, FinetuningArguments, GeneratingArguments] _TRAIN_MCA_CLS = tuple[ ModelArguments, DataArguments, McaTrainingArguments, FinetuningArguments, GeneratingArguments ] else: _TRAIN_MCA_ARGS = [] _TRAIN_MCA_CLS = tuple() def read_args(args: dict[str, Any] | list[str] | None = None) -> dict[str, Any] | list[str]: r"""Get arguments from the command line or a config file.""" if args is not None: return args if sys.argv[1].endswith(".yaml") or sys.argv[1].endswith(".yml"): override_config = OmegaConf.from_cli(sys.argv[2:]) dict_config = OmegaConf.load(Path(sys.argv[1]).absolute()) return OmegaConf.to_container(OmegaConf.merge(dict_config, override_config)) elif sys.argv[1].endswith(".json"): override_config = OmegaConf.from_cli(sys.argv[2:]) dict_config = OmegaConf.load(Path(sys.argv[1]).absolute()) return OmegaConf.to_container(OmegaConf.merge(dict_config, override_config)) else: return sys.argv[1:] def _parse_args( parser: "HfArgumentParser", args: dict[str, Any] | list[str] | None = None, allow_extra_keys: bool = False ) -> tuple[Any]: args = read_args(args) if isinstance(args, dict): return parser.parse_dict(args, allow_extra_keys=allow_extra_keys) (*parsed_args, unknown_args) = parser.parse_args_into_dataclasses(args=args, return_remaining_strings=True) if unknown_args and not allow_extra_keys: print(parser.format_help()) print(f"Got unknown args, potentially deprecated arguments: {unknown_args}") raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {unknown_args}") return tuple(parsed_args) def _set_transformers_logging() -> None: if os.getenv("LLAMAFACTORY_VERBOSITY", "INFO") in ["DEBUG", "INFO"]: transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() def _set_env_vars() -> None: if is_torch_npu_available(): # avoid JIT compile on NPU devices, see https://zhuanlan.zhihu.com/p/660875458 torch.npu.set_compile_mode(jit_compile=is_env_enabled("NPU_JIT_COMPILE")) # avoid use fork method on NPU devices, see https://github.com/hiyouga/LLaMA-Factory/issues/7447 os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" def _verify_model_args( model_args: "ModelArguments", data_args: "DataArguments", finetuning_args: "FinetuningArguments", ) -> None: if model_args.adapter_name_or_path is not None and finetuning_args.finetuning_type != "lora": raise ValueError("Adapter is only valid for the LoRA method.") if model_args.quantization_bit is not None: if finetuning_args.finetuning_type not in ["lora", "oft"]: raise ValueError("Quantization is only compatible with the LoRA or OFT method.") if finetuning_args.pissa_init: raise ValueError("Please use scripts/pissa_init.py to initialize PiSSA for a quantized model.") if model_args.resize_vocab: raise ValueError("Cannot resize embedding layers of a quantized model.") if model_args.adapter_name_or_path is not None and finetuning_args.create_new_adapter: raise ValueError("Cannot create new adapter upon a quantized model.") if model_args.adapter_name_or_path is not None and len(model_args.adapter_name_or_path) != 1: raise ValueError("Quantized model only accepts a single adapter. Merge them first.") if data_args.template == "yi" and model_args.use_fast_tokenizer: logger.warning_rank0("We should use slow tokenizer for the Yi models. Change `use_fast_tokenizer` to False.") model_args.use_fast_tokenizer = False def _check_extra_dependencies( model_args: "ModelArguments", finetuning_args: "FinetuningArguments", training_args: Optional["TrainingArguments"] = None, ) -> None: if model_args.use_kt: check_version("ktransformers", mandatory=True) if model_args.use_unsloth: check_version("unsloth", mandatory=True) if model_args.enable_liger_kernel: check_version("liger-kernel", mandatory=True) if model_args.mixture_of_depths is not None: check_version("mixture-of-depth>=1.1.6", mandatory=True) if model_args.infer_backend == EngineName.VLLM: check_version("vllm>=0.4.3,<=0.11.0") check_version("vllm", mandatory=True) elif model_args.infer_backend == EngineName.SGLANG: check_version("sglang>=0.4.5") check_version("sglang", mandatory=True) if finetuning_args.use_galore: check_version("galore_torch", mandatory=True) if finetuning_args.use_apollo: check_version("apollo_torch", mandatory=True) if finetuning_args.use_badam: check_version("badam>=1.2.1", mandatory=True) if finetuning_args.use_adam_mini: check_version("adam-mini", mandatory=True) if finetuning_args.use_swanlab: check_version("swanlab", mandatory=True) if finetuning_args.plot_loss: check_version("matplotlib", mandatory=True) if training_args is not None: if training_args.deepspeed: # pin deepspeed version < 0.17 because of https://github.com/deepspeedai/DeepSpeed/issues/7347 check_version("deepspeed", mandatory=True) check_version("deepspeed>=0.10.0,<=0.16.9") if training_args.predict_with_generate: check_version("jieba", mandatory=True) check_version("nltk", mandatory=True) check_version("rouge_chinese", mandatory=True) def _parse_train_args(args: dict[str, Any] | list[str] | None = None) -> _TRAIN_CLS: parser = HfArgumentParser(_TRAIN_ARGS) allow_extra_keys = is_env_enabled("ALLOW_EXTRA_ARGS") return _parse_args(parser, args, allow_extra_keys=allow_extra_keys) def _parse_train_mca_args(args: dict[str, Any] | list[str] | None = None) -> _TRAIN_MCA_CLS: parser = HfArgumentParser(_TRAIN_MCA_ARGS) allow_extra_keys = is_env_enabled("ALLOW_EXTRA_ARGS") model_args, data_args, training_args, finetuning_args, generating_args = _parse_args( parser, args, allow_extra_keys=allow_extra_keys ) _configure_mca_training_args(training_args, data_args, finetuning_args) return model_args, data_args, training_args, finetuning_args, generating_args def _configure_mca_training_args(training_args, data_args, finetuning_args) -> None: """Patch training args to avoid args checking errors and sync MCA settings.""" training_args.predict_with_generate = False training_args.generation_max_length = data_args.cutoff_len training_args.generation_num_beams = 1 training_args.use_mca = True finetuning_args.use_mca = True def _parse_infer_args(args: dict[str, Any] | list[str] | None = None) -> _INFER_CLS: parser = HfArgumentParser(_INFER_ARGS) allow_extra_keys = is_env_enabled("ALLOW_EXTRA_ARGS") return _parse_args(parser, args, allow_extra_keys=allow_extra_keys) def _parse_eval_args(args: dict[str, Any] | list[str] | None = None) -> _EVAL_CLS: parser = HfArgumentParser(_EVAL_ARGS) allow_extra_keys = is_env_enabled("ALLOW_EXTRA_ARGS") return _parse_args(parser, args, allow_extra_keys=allow_extra_keys) def get_ray_args(args: dict[str, Any] | list[str] | None = None) -> RayArguments: parser = HfArgumentParser(RayArguments) (ray_args,) = _parse_args(parser, args, allow_extra_keys=True) return ray_args def get_train_args(args: dict[str, Any] | list[str] | None = None) -> _TRAIN_CLS: if is_env_enabled("USE_MCA"): model_args, data_args, training_args, finetuning_args, generating_args = _parse_train_mca_args(args) else: model_args, data_args, training_args, finetuning_args, generating_args = _parse_train_args(args) finetuning_args.use_mca = False # Setup logging if training_args.should_log: _set_transformers_logging() # Check arguments if finetuning_args.stage != "sft": if training_args.predict_with_generate: raise ValueError("`predict_with_generate` cannot be set as True except SFT.") if data_args.neat_packing: raise ValueError("`neat_packing` cannot be set as True except SFT.") if data_args.train_on_prompt or data_args.mask_history: raise ValueError("`train_on_prompt` or `mask_history` cannot be set as True except SFT.") if finetuning_args.stage == "sft" and training_args.do_predict and not training_args.predict_with_generate: raise ValueError("Please enable `predict_with_generate` to save model predictions.") if finetuning_args.stage in ["rm", "ppo"] and training_args.load_best_model_at_end: raise ValueError("RM and PPO stages do not support `load_best_model_at_end`.") if finetuning_args.stage == "ppo": if not training_args.do_train: raise ValueError("PPO training does not support evaluation, use the SFT stage to evaluate models.") if model_args.shift_attn: raise ValueError("PPO training is incompatible with S^2-Attn.") if finetuning_args.reward_model_type == "lora" and model_args.use_kt: raise ValueError("KTransformers does not support lora reward model.") if finetuning_args.reward_model_type == "lora" and model_args.use_unsloth: raise ValueError("Unsloth does not support lora reward model.") if training_args.report_to and training_args.report_to[0] not in ["wandb", "tensorboard"]: raise ValueError("PPO only accepts wandb or tensorboard logger.") if not model_args.use_kt and training_args.parallel_mode == ParallelMode.NOT_DISTRIBUTED: raise ValueError("Please launch distributed training with `llamafactory-cli` or `torchrun`.") if training_args.deepspeed and training_args.parallel_mode != ParallelMode.DISTRIBUTED: raise ValueError("Please use `FORCE_TORCHRUN=1` to launch DeepSpeed training.") if training_args.max_steps == -1 and data_args.streaming: raise ValueError("Please specify `max_steps` in streaming mode.") if training_args.do_train and data_args.dataset is None: raise ValueError("Please specify dataset for training.") if (training_args.do_eval or training_args.do_predict or training_args.predict_with_generate) and ( data_args.eval_dataset is None and data_args.val_size < 1e-6 ): raise ValueError("Please make sure eval_dataset be provided or val_size >1e-6") if training_args.predict_with_generate: if is_deepspeed_zero3_enabled(): raise ValueError("`predict_with_generate` is incompatible with DeepSpeed ZeRO-3.") if finetuning_args.compute_accuracy: raise ValueError("Cannot use `predict_with_generate` and `compute_accuracy` together.") if training_args.do_train and model_args.quantization_device_map == "auto": raise ValueError("Cannot use device map for quantized models in training.") if finetuning_args.pissa_init and is_deepspeed_zero3_enabled(): raise ValueError("Please use scripts/pissa_init.py to initialize PiSSA in DeepSpeed ZeRO-3.") if finetuning_args.pure_bf16: if not (is_torch_bf16_gpu_available() or (is_torch_npu_available() and torch.npu.is_bf16_supported())): raise ValueError("This device does not support `pure_bf16`.") if is_deepspeed_zero3_enabled(): raise ValueError("`pure_bf16` is incompatible with DeepSpeed ZeRO-3.") if training_args.parallel_mode == ParallelMode.DISTRIBUTED: if finetuning_args.use_galore and finetuning_args.galore_layerwise: raise ValueError("Distributed training does not support layer-wise GaLore.") if finetuning_args.use_apollo and finetuning_args.apollo_layerwise: raise ValueError("Distributed training does not support layer-wise APOLLO.") if finetuning_args.use_badam: if finetuning_args.badam_mode == "ratio": raise ValueError("Radio-based BAdam does not yet support distributed training, use layer-wise BAdam.") elif not is_deepspeed_zero3_enabled(): raise ValueError("Layer-wise BAdam only supports DeepSpeed ZeRO-3 training.") if training_args.deepspeed is not None and (finetuning_args.use_galore or finetuning_args.use_apollo): raise ValueError("GaLore and APOLLO are incompatible with DeepSpeed yet.") if training_args.fp8 and training_args.quantization_bit is not None: raise ValueError("FP8 training is not compatible with quantization. Please disable one of them.") if model_args.infer_backend != EngineName.HF: raise ValueError("vLLM/SGLang backend is only available for API, CLI and Web.") if model_args.use_unsloth and is_deepspeed_zero3_enabled(): raise ValueError("Unsloth is incompatible with DeepSpeed ZeRO-3.") if model_args.use_kt and is_deepspeed_zero3_enabled(): raise ValueError("KTransformers is incompatible with DeepSpeed ZeRO-3.") if data_args.neat_packing and is_transformers_version_greater_than("4.53.0"): raise ValueError("Neat packing is incompatible with transformers>=4.53.0.") _set_env_vars() _verify_model_args(model_args, data_args, finetuning_args) _check_extra_dependencies(model_args, finetuning_args, training_args) if training_args.fp8_enable_fsdp_float8_all_gather and not training_args.fp8: logger.warning_rank0("fp8_enable_fsdp_float8_all_gather requires fp8=True. Setting fp8=True.") model_args.fp8 = True if ( training_args.do_train and finetuning_args.finetuning_type == "lora" and model_args.quantization_bit is None and model_args.resize_vocab and finetuning_args.additional_target is None ): logger.warning_rank0( "Remember to add embedding layers to `additional_target` to make the added tokens trainable." ) if training_args.do_train and model_args.quantization_bit is not None and (not model_args.upcast_layernorm): logger.warning_rank0("We recommend enable `upcast_layernorm` in quantized training.") if training_args.do_train and (not training_args.fp16) and (not training_args.bf16): logger.warning_rank0("We recommend enable mixed precision training.") if ( training_args.do_train and (finetuning_args.use_galore or finetuning_args.use_apollo) and not finetuning_args.pure_bf16 ): logger.warning_rank0( "Using GaLore or APOLLO with mixed precision training may significantly increases GPU memory usage." ) if (not training_args.do_train) and model_args.quantization_bit is not None: logger.warning_rank0("Evaluating model in 4/8-bit mode may cause lower scores.") if (not training_args.do_train) and finetuning_args.stage == "dpo" and finetuning_args.ref_model is None: logger.warning_rank0("Specify `ref_model` for computing rewards at evaluation.") # Post-process training arguments training_args.generation_max_length = training_args.generation_max_length or data_args.cutoff_len training_args.generation_num_beams = data_args.eval_num_beams or training_args.generation_num_beams training_args.remove_unused_columns = False # important for multimodal dataset if finetuning_args.finetuning_type == "lora": # https://github.com/huggingface/transformers/blob/v4.50.0/src/transformers/trainer.py#L782 training_args.label_names = training_args.label_names or ["labels"] if "swanlab" in training_args.report_to and finetuning_args.use_swanlab: training_args.report_to.remove("swanlab") if ( training_args.parallel_mode == ParallelMode.DISTRIBUTED and training_args.ddp_find_unused_parameters is None and finetuning_args.finetuning_type == "lora" ): logger.info_rank0("Set `ddp_find_unused_parameters` to False in DDP training since LoRA is enabled.") training_args.ddp_find_unused_parameters = False if finetuning_args.stage in ["rm", "ppo"] and finetuning_args.finetuning_type in ["full", "freeze"]: can_resume_from_checkpoint = False if training_args.resume_from_checkpoint is not None: logger.warning_rank0("Cannot resume from checkpoint in current stage.") training_args.resume_from_checkpoint = None else: can_resume_from_checkpoint = True if ( training_args.resume_from_checkpoint is None and training_args.do_train and os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir and can_resume_from_checkpoint ): last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and any( os.path.isfile(os.path.join(training_args.output_dir, name)) for name in CHECKPOINT_NAMES ): raise ValueError("Output directory already exists and is not empty. Please set `overwrite_output_dir`.") if last_checkpoint is not None: training_args.resume_from_checkpoint = last_checkpoint logger.info_rank0(f"Resuming training from {training_args.resume_from_checkpoint}.") logger.info_rank0("Change `output_dir` or use `overwrite_output_dir` to avoid.") if ( finetuning_args.stage in ["rm", "ppo"] and finetuning_args.finetuning_type == "lora" and training_args.resume_from_checkpoint is not None ): logger.warning_rank0( f"Add {training_args.resume_from_checkpoint} to `adapter_name_or_path` to resume training from checkpoint." ) # Post-process model arguments if training_args.bf16 or finetuning_args.pure_bf16: model_args.compute_dtype = torch.bfloat16 elif training_args.fp16: model_args.compute_dtype = torch.float16 model_args.device_map = {"": get_current_device()} model_args.model_max_length = data_args.cutoff_len model_args.block_diag_attn = data_args.neat_packing data_args.packing = data_args.packing if data_args.packing is not None else finetuning_args.stage == "pt" # Log on each process the small summary logger.info( f"Process rank: {training_args.process_index}, " f"world size: {training_args.world_size}, device: {training_args.device}, " f"distributed training: {training_args.parallel_mode == ParallelMode.DISTRIBUTED}, " f"compute dtype: {str(model_args.compute_dtype)}" ) transformers.set_seed(training_args.seed) return model_args, data_args, training_args, finetuning_args, generating_args def get_infer_args(args: dict[str, Any] | list[str] | None = None) -> _INFER_CLS: model_args, data_args, finetuning_args, generating_args = _parse_infer_args(args) # Setup logging _set_transformers_logging() # Check arguments if model_args.infer_backend == "vllm": if finetuning_args.stage != "sft": raise ValueError("vLLM engine only supports auto-regressive models.") if model_args.quantization_bit is not None: raise ValueError("vLLM engine does not support bnb quantization (GPTQ and AWQ are supported).") if model_args.rope_scaling is not None: raise ValueError("vLLM engine does not support RoPE scaling.") if model_args.adapter_name_or_path is not None and len(model_args.adapter_name_or_path) != 1: raise ValueError("vLLM only accepts a single adapter. Merge them first.") _set_env_vars() _verify_model_args(model_args, data_args, finetuning_args) _check_extra_dependencies(model_args, finetuning_args) # Post-process model arguments if model_args.export_dir is not None and model_args.export_device == "cpu": model_args.device_map = {"": torch.device("cpu")} if data_args.cutoff_len != DataArguments().cutoff_len: # override cutoff_len if it is not default model_args.model_max_length = data_args.cutoff_len else: model_args.device_map = "auto" return model_args, data_args, finetuning_args, generating_args def get_eval_args(args: dict[str, Any] | list[str] | None = None) -> _EVAL_CLS: model_args, data_args, eval_args, finetuning_args = _parse_eval_args(args) # Setup logging _set_transformers_logging() # Check arguments if model_args.infer_backend != EngineName.HF: raise ValueError("vLLM/SGLang backend is only available for API, CLI and Web.") _set_env_vars() _verify_model_args(model_args, data_args, finetuning_args) _check_extra_dependencies(model_args, finetuning_args) model_args.device_map = "auto" transformers.set_seed(eval_args.seed) return model_args, data_args, eval_args, finetuning_args
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/hparams/training_args.py
src/llamafactory/hparams/training_args.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from dataclasses import dataclass, field from typing import Literal from transformers import Seq2SeqTrainingArguments from transformers.training_args import _convert_str_dict from ..extras.misc import is_env_enabled, use_ray from ..extras.packages import is_mcore_adapter_available if is_env_enabled("USE_MCA"): if not is_mcore_adapter_available(): raise ImportError( "mcore_adapter is required when USE_MCA=1. Please install `mcore_adapter` and its dependencies." ) from mcore_adapter import Seq2SeqTrainingArguments as McaSeq2SeqTrainingArguments BaseTrainingArguments = McaSeq2SeqTrainingArguments else: BaseTrainingArguments = Seq2SeqTrainingArguments @dataclass class RayArguments: r"""Arguments pertaining to the Ray training.""" ray_run_name: str | None = field( default=None, metadata={"help": "The training results will be saved at `<ray_storage_path>/ray_run_name`."}, ) ray_storage_path: str = field( default="./saves", metadata={"help": "The storage path to save training results to"}, ) ray_storage_filesystem: Literal["s3", "gs", "gcs"] | None = field( default=None, metadata={"help": "The storage filesystem to use. If None specified, local filesystem will be used."}, ) ray_num_workers: int = field( default=1, metadata={"help": "The number of workers for Ray training. Default is 1 worker."}, ) resources_per_worker: dict | str = field( default_factory=lambda: {"GPU": 1}, metadata={"help": "The resources per worker for Ray training. Default is to use 1 GPU per worker."}, ) placement_strategy: Literal["SPREAD", "PACK", "STRICT_SPREAD", "STRICT_PACK"] = field( default="PACK", metadata={"help": "The placement strategy for Ray training. Default is PACK."}, ) ray_init_kwargs: dict | str | None = field( default=None, metadata={"help": "The arguments to pass to ray.init for Ray training. Default is None."}, ) def __post_init__(self): self.use_ray = use_ray() if isinstance(self.resources_per_worker, str) and self.resources_per_worker.startswith("{"): self.resources_per_worker = _convert_str_dict(json.loads(self.resources_per_worker)) if isinstance(self.ray_init_kwargs, str) and self.ray_init_kwargs.startswith("{"): self.ray_init_kwargs = _convert_str_dict(json.loads(self.ray_init_kwargs)) if self.ray_storage_filesystem is not None: if self.ray_storage_filesystem not in ["s3", "gs", "gcs"]: raise ValueError( f"ray_storage_filesystem must be one of ['s3', 'gs', 'gcs'], got {self.ray_storage_filesystem}." ) import pyarrow.fs as fs if self.ray_storage_filesystem == "s3": self.ray_storage_filesystem = fs.S3FileSystem() elif self.ray_storage_filesystem == "gs" or self.ray_storage_filesystem == "gcs": self.ray_storage_filesystem = fs.GcsFileSystem() @dataclass class Fp8Arguments: r"""Arguments pertaining to the FP8 training.""" fp8: bool = field( default=False, metadata={ "help": "Enable FP8 mixed precision training via HuggingFace Accelerate. " "Requires PyTorch 2.7+ and Hopper architecture GPUs." }, ) fp8_backend: str = field( default="auto", metadata={ "help": "FP8 backend to use ('auto', 'torchao', 'te', 'msamp'). 'auto' selects best available backend." }, ) fp8_enable_fsdp_float8_all_gather: bool = field( default=False, metadata={"help": "Enable FP8 optimizations for FSDP2 all-gather operations."}, ) @dataclass class TrainingArguments(Fp8Arguments, RayArguments, BaseTrainingArguments): r"""Arguments pertaining to the trainer.""" overwrite_output_dir: bool = field( default=False, metadata={"help": "deprecated"}, ) def __post_init__(self): RayArguments.__post_init__(self) BaseTrainingArguments.__post_init__(self)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/hparams/model_args.py
src/llamafactory/hparams/model_args.py
# Copyright 2025 HuggingFace Inc., the KVCache.AI team, Approaching AI, and the LlamaFactory team. # # This code is inspired by the HuggingFace's transformers library. # https://github.com/huggingface/transformers/blob/v4.40.0/examples/pytorch/language-modeling/run_clm.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from dataclasses import asdict, dataclass, field, fields from typing import Any, Literal, Self import torch from omegaconf import OmegaConf from transformers.training_args import _convert_str_dict from ..extras.constants import AttentionFunction, EngineName, QuantizationMethod, RopeScaling from ..extras.logging import get_logger logger = get_logger(__name__) @dataclass class BaseModelArguments: r"""Arguments pertaining to the model.""" model_name_or_path: str | None = field( default=None, metadata={ "help": "Path to the model weight or identifier from huggingface.co/models or modelscope.cn/models." }, ) adapter_name_or_path: str | None = field( default=None, metadata={ "help": ( "Path to the adapter weight or identifier from huggingface.co/models. " "Use commas to separate multiple adapters." ) }, ) adapter_folder: str | None = field( default=None, metadata={"help": "The folder containing the adapter weights to load."}, ) cache_dir: str | None = field( default=None, metadata={"help": "Where to store the pre-trained models downloaded from huggingface.co or modelscope.cn."}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether or not to use one of the fast tokenizer (backed by the tokenizers library)."}, ) resize_vocab: bool = field( default=False, metadata={"help": "Whether or not to resize the tokenizer vocab and the embedding layers."}, ) split_special_tokens: bool = field( default=False, metadata={"help": "Whether or not the special tokens should be split during the tokenization process."}, ) add_tokens: str | None = field( default=None, metadata={ "help": "Non-special tokens to be added into the tokenizer. Use commas to separate multiple tokens." }, ) add_special_tokens: str | None = field( default=None, metadata={"help": "Special tokens to be added into the tokenizer. Use commas to separate multiple tokens."}, ) new_special_tokens_config: str | None = field( default=None, metadata={ "help": ( "Path to YAML config with special token descriptions for semantic initialization. " "If set, this takes precedence over add_special_tokens. " "YAML format: {'<token>': 'description text', ...}" ) }, ) init_special_tokens: Literal["noise_init", "desc_init", "desc_init_w_noise"] = field( default="noise_init", metadata={ "help": ( "Initialization method for new special tokens: " "'noise_init' (default, random noise around mean), " "'desc_init' (semantic initialization from descriptions), " "'desc_init_w_noise' (semantic + random noise). " "Note: 'desc_init' methods require new_special_tokens_config." ) }, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) low_cpu_mem_usage: bool = field( default=True, metadata={"help": "Whether or not to use memory-efficient model loading."}, ) rope_scaling: RopeScaling | None = field( default=None, metadata={"help": "Which scaling strategy should be adopted for the RoPE embeddings."}, ) flash_attn: AttentionFunction = field( default=AttentionFunction.AUTO, metadata={"help": "Enable FlashAttention for faster training and inference."}, ) shift_attn: bool = field( default=False, metadata={"help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA."}, ) mixture_of_depths: Literal["convert", "load"] | None = field( default=None, metadata={"help": "Convert the model to mixture-of-depths (MoD) or load the MoD model."}, ) use_unsloth: bool = field( default=False, metadata={"help": "Whether or not to use unsloth's optimization for the LoRA training."}, ) use_unsloth_gc: bool = field( default=False, metadata={"help": "Whether or not to use unsloth's gradient checkpointing (no need to install unsloth)."}, ) enable_liger_kernel: bool = field( default=False, metadata={"help": "Whether or not to enable liger kernel for faster training."}, ) moe_aux_loss_coef: float | None = field( default=None, metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."}, ) disable_gradient_checkpointing: bool = field( default=False, metadata={"help": "Whether or not to disable gradient checkpointing."}, ) use_reentrant_gc: bool = field( default=True, metadata={"help": "Whether or not to use reentrant gradient checkpointing."}, ) upcast_layernorm: bool = field( default=False, metadata={"help": "Whether or not to upcast the layernorm weights in fp32."}, ) upcast_lmhead_output: bool = field( default=False, metadata={"help": "Whether or not to upcast the output of lm_head in fp32."}, ) train_from_scratch: bool = field( default=False, metadata={"help": "Whether or not to randomly initialize the model weights."}, ) infer_backend: EngineName = field( default=EngineName.HF, metadata={"help": "Backend engine used at inference."}, ) offload_folder: str = field( default="offload", metadata={"help": "Path to offload model weights."}, ) use_kv_cache: bool = field( default=True, metadata={"help": "Whether or not to use KV cache in generation."}, ) use_v1_kernels: bool | None = field( default=False, metadata={"help": "Whether or not to use high-performance kernels in training."}, ) infer_dtype: Literal["auto", "float16", "bfloat16", "float32"] = field( default="auto", metadata={"help": "Data type for model weights and activations at inference."}, ) hf_hub_token: str | None = field( default=None, metadata={"help": "Auth token to log in with Hugging Face Hub."}, ) ms_hub_token: str | None = field( default=None, metadata={"help": "Auth token to log in with ModelScope Hub."}, ) om_hub_token: str | None = field( default=None, metadata={"help": "Auth token to log in with Modelers Hub."}, ) print_param_status: bool = field( default=False, metadata={"help": "For debugging purposes, print the status of the parameters in the model."}, ) trust_remote_code: bool = field( default=False, metadata={"help": "Whether to trust the execution of code from datasets/models defined on the Hub or not."}, ) def __post_init__(self): if self.model_name_or_path is None: raise ValueError("Please provide `model_name_or_path`.") if self.split_special_tokens and self.use_fast_tokenizer: raise ValueError("`split_special_tokens` is only supported for slow tokenizers.") if self.adapter_name_or_path is not None: # support merging multiple lora weights self.adapter_name_or_path = [path.strip() for path in self.adapter_name_or_path.split(",")] if self.add_tokens is not None: # support multiple tokens self.add_tokens = [token.strip() for token in self.add_tokens.split(",")] # Process special tokens with priority: new_special_tokens_config > add_special_tokens if self.new_special_tokens_config is not None: # Priority 1: Load from YAML config (extracts both tokens and descriptions) try: cfg = OmegaConf.load(self.new_special_tokens_config) token_descriptions = OmegaConf.to_container(cfg) if not isinstance(token_descriptions, dict): raise ValueError( f"YAML config must be a dictionary mapping tokens to descriptions. " f"Got: {type(token_descriptions)}" ) # Extract token list from config keys extracted_tokens = list(token_descriptions.keys()) # Warn if both are set if self.add_special_tokens is not None: logger.warning_rank0( "Both 'new_special_tokens_config' and 'add_special_tokens' are set. " f"Using tokens from config: {extracted_tokens}" ) # Override add_special_tokens with extracted tokens (as list) self.add_special_tokens = extracted_tokens # Store descriptions internally for later use (internal attribute) self._special_token_descriptions = token_descriptions logger.info_rank0( f"Loaded {len(extracted_tokens)} special tokens with descriptions from: " f"{self.new_special_tokens_config}" ) except Exception as e: logger.error_rank0( f"Failed to load special tokens config from '{self.new_special_tokens_config}': {e}" ) raise elif self.add_special_tokens is not None: # Priority 2: Use simple comma-separated string (no descriptions) self.add_special_tokens = [token.strip() for token in self.add_special_tokens.split(",")] self._special_token_descriptions = None else: # No special tokens to add self._special_token_descriptions = None # Validate init method if self.init_special_tokens in ["desc_init", "desc_init_w_noise"]: if self._special_token_descriptions is None: logger.warning_rank0( f"init_special_tokens='{self.init_special_tokens}' requires new_special_tokens_config. " "Falling back to 'noise_init'" ) self.init_special_tokens = "noise_init" @dataclass class QuantizationArguments: r"""Arguments pertaining to the quantization method.""" quantization_method: QuantizationMethod = field( default=QuantizationMethod.BNB, metadata={"help": "Quantization method to use for on-the-fly quantization."}, ) quantization_bit: int | None = field( default=None, metadata={"help": "The number of bits to quantize the model using on-the-fly quantization."}, ) quantization_type: Literal["fp4", "nf4"] = field( default="nf4", metadata={"help": "Quantization data type to use in bitsandbytes int4 training."}, ) double_quantization: bool = field( default=True, metadata={"help": "Whether or not to use double quantization in bitsandbytes int4 training."}, ) quantization_device_map: Literal["auto"] | None = field( default=None, metadata={"help": "Device map used to infer the 4-bit quantized model, needs bitsandbytes>=0.43.0."}, ) @dataclass class ProcessorArguments: r"""Arguments pertaining to the image processor.""" image_max_pixels: int = field( default=768 * 768, metadata={"help": "The maximum number of pixels of image inputs."}, ) image_min_pixels: int = field( default=32 * 32, metadata={"help": "The minimum number of pixels of image inputs."}, ) image_do_pan_and_scan: bool = field( default=False, metadata={"help": "Use pan and scan to process image for gemma3."}, ) crop_to_patches: bool = field( default=False, metadata={"help": "Whether to crop the image to patches for internvl."}, ) video_max_pixels: int = field( default=256 * 256, metadata={"help": "The maximum number of pixels of video inputs."}, ) video_min_pixels: int = field( default=16 * 16, metadata={"help": "The minimum number of pixels of video inputs."}, ) video_fps: float = field( default=2.0, metadata={"help": "The frames to sample per second for video inputs."}, ) video_maxlen: int = field( default=128, metadata={"help": "The maximum number of sampled frames for video inputs."}, ) use_audio_in_video: bool = field( default=False, metadata={"help": "Whether or not to use audio in video inputs."}, ) audio_sampling_rate: int = field( default=16000, metadata={"help": "The sampling rate of audio inputs."}, ) def __post_init__(self): if self.image_max_pixels < self.image_min_pixels: raise ValueError("`image_max_pixels` cannot be smaller than `image_min_pixels`.") if self.video_max_pixels < self.video_min_pixels: raise ValueError("`video_max_pixels` cannot be smaller than `video_min_pixels`.") @dataclass class ExportArguments: r"""Arguments pertaining to the model export.""" export_dir: str | None = field( default=None, metadata={"help": "Path to the directory to save the exported model."}, ) export_size: int = field( default=5, metadata={"help": "The file shard size (in GB) of the exported model."}, ) export_device: Literal["cpu", "auto"] = field( default="cpu", metadata={"help": "The device used in model export, use `auto` to accelerate exporting."}, ) export_quantization_bit: int | None = field( default=None, metadata={"help": "The number of bits to quantize the exported model."}, ) export_quantization_dataset: str | None = field( default=None, metadata={"help": "Path to the dataset or dataset name to use in quantizing the exported model."}, ) export_quantization_nsamples: int = field( default=128, metadata={"help": "The number of samples used for quantization."}, ) export_quantization_maxlen: int = field( default=1024, metadata={"help": "The maximum length of the model inputs used for quantization."}, ) export_legacy_format: bool = field( default=False, metadata={"help": "Whether or not to save the `.bin` files instead of `.safetensors`."}, ) export_hub_model_id: str | None = field( default=None, metadata={"help": "The name of the repository if push the model to the Hugging Face hub."}, ) def __post_init__(self): if self.export_quantization_bit is not None and self.export_quantization_dataset is None: raise ValueError("Quantization dataset is necessary for exporting.") @dataclass class VllmArguments: r"""Arguments pertaining to the vLLM worker.""" vllm_maxlen: int = field( default=4096, metadata={"help": "Maximum sequence (prompt + response) length of the vLLM engine."}, ) vllm_gpu_util: float = field( default=0.7, metadata={"help": "The fraction of GPU memory in (0,1) to be used for the vLLM engine."}, ) vllm_enforce_eager: bool = field( default=False, metadata={"help": "Whether or not to disable CUDA graph in the vLLM engine."}, ) vllm_max_lora_rank: int = field( default=32, metadata={"help": "Maximum rank of all LoRAs in the vLLM engine."}, ) vllm_config: dict | str | None = field( default=None, metadata={"help": "Config to initialize the vllm engine. Please use JSON strings."}, ) def __post_init__(self): if isinstance(self.vllm_config, str) and self.vllm_config.startswith("{"): self.vllm_config = _convert_str_dict(json.loads(self.vllm_config)) @dataclass class SGLangArguments: r"""Arguments pertaining to the SGLang worker.""" sglang_maxlen: int = field( default=4096, metadata={"help": "Maximum sequence (prompt + response) length of the SGLang engine."}, ) sglang_mem_fraction: float = field( default=0.7, metadata={"help": "The memory fraction (0-1) to be used for the SGLang engine."}, ) sglang_tp_size: int = field( default=-1, metadata={"help": "Tensor parallel size for the SGLang engine."}, ) sglang_config: dict | str | None = field( default=None, metadata={"help": "Config to initialize the SGLang engine. Please use JSON strings."}, ) sglang_lora_backend: Literal["triton", "flashinfer"] = field( default="triton", metadata={ "help": "The backend of running GEMM kernels for Lora modules. Recommend using the Triton LoRA backend for better performance and stability." }, ) def __post_init__(self): if isinstance(self.sglang_config, str) and self.sglang_config.startswith("{"): self.sglang_config = _convert_str_dict(json.loads(self.sglang_config)) @dataclass class KTransformersArguments: r"""Arguments pertaining to the KT training.""" use_kt: bool = field( default=False, metadata={"help": "Whether To Use KTransformers Optimizations For LoRA Training."}, ) kt_optimize_rule: str | None = field( default=None, metadata={ "help": "Path To The KTransformers Optimize Rule; See https://github.com/kvcache-ai/ktransformers/." }, ) cpu_infer: int | None = field( default=32, metadata={"help": "Number Of CPU Cores Used For Computation."}, ) chunk_size: int | None = field( default=8192, metadata={"help": "Chunk Size Used For CPU Compute In KTransformers."}, ) mode: str | None = field( default="normal", metadata={"help": "Normal Or Long_Context For Llama Models."}, ) kt_maxlen: int = field( default=4096, metadata={"help": "Maximum Sequence (Prompt + Response) Length Of The KT Engine."}, ) kt_use_cuda_graph: bool = field( default=True, metadata={"help": "Whether To Use CUDA Graphs For The KT Engine."}, ) kt_mode: str = field( default="normal", metadata={"help": "Normal Or Long_Context Mode For The KT Engine."}, ) kt_force_think: bool = field( default=False, metadata={"help": "Force-Think Toggle For The KT Engine."}, ) @dataclass class ModelArguments( SGLangArguments, VllmArguments, KTransformersArguments, ExportArguments, ProcessorArguments, QuantizationArguments, BaseModelArguments, ): r"""Arguments pertaining to which model/config/tokenizer we are going to fine-tune or infer. The class on the most right will be displayed first. """ compute_dtype: torch.dtype | None = field( default=None, init=False, metadata={"help": "Torch data type for computing model outputs, derived from `fp/bf16`. Do not specify it."}, ) device_map: str | dict[str, Any] | None = field( default=None, init=False, metadata={"help": "Device map for model placement, derived from training stage. Do not specify it."}, ) model_max_length: int | None = field( default=None, init=False, metadata={"help": "The maximum input length for model, derived from `cutoff_len`. Do not specify it."}, ) block_diag_attn: bool = field( default=False, init=False, metadata={"help": "Whether use block diag attention or not, derived from `neat_packing`. Do not specify it."}, ) def __post_init__(self): BaseModelArguments.__post_init__(self) ProcessorArguments.__post_init__(self) ExportArguments.__post_init__(self) VllmArguments.__post_init__(self) SGLangArguments.__post_init__(self) @classmethod def copyfrom(cls, source: "Self", **kwargs) -> "Self": init_args, lazy_args = {}, {} for attr in fields(source): if attr.init: init_args[attr.name] = getattr(source, attr.name) else: lazy_args[attr.name] = getattr(source, attr.name) init_args.update(kwargs) result = cls(**init_args) for name, value in lazy_args.items(): setattr(result, name, value) return result def to_dict(self) -> dict[str, Any]: args = asdict(self) args = {k: f"<{k.upper()}>" if k.endswith("token") else v for k, v in args.items()} return args
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/hparams/finetuning_args.py
src/llamafactory/hparams/finetuning_args.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import asdict, dataclass, field from typing import Any, Literal @dataclass class FreezeArguments: r"""Arguments pertaining to the freeze (partial-parameter) training.""" freeze_trainable_layers: int = field( default=2, metadata={ "help": ( "The number of trainable layers for freeze (partial-parameter) fine-tuning. " "Positive numbers mean the last n layers are set as trainable, " "negative numbers mean the first n layers are set as trainable." ) }, ) freeze_trainable_modules: str = field( default="all", metadata={ "help": ( "Name(s) of trainable modules for freeze (partial-parameter) fine-tuning. " "Use commas to separate multiple modules. " "Use `all` to specify all the available modules." ) }, ) freeze_extra_modules: str | None = field( default=None, metadata={ "help": ( "Name(s) of modules apart from hidden layers to be set as trainable " "for freeze (partial-parameter) fine-tuning. " "Use commas to separate multiple modules." ) }, ) @dataclass class LoraArguments: r"""Arguments pertaining to the LoRA training.""" additional_target: str | None = field( default=None, metadata={ "help": ( "Name(s) of modules apart from LoRA layers to be set as trainable " "and saved in the final checkpoint. " "Use commas to separate multiple modules." ) }, ) lora_alpha: int | None = field( default=None, metadata={"help": "The scale factor for LoRA fine-tuning (default: lora_rank * 2)."}, ) lora_dropout: float = field( default=0.0, metadata={"help": "Dropout rate for the LoRA fine-tuning."}, ) lora_rank: int = field( default=8, metadata={"help": "The intrinsic dimension for LoRA fine-tuning."}, ) lora_target: str = field( default="all", metadata={ "help": ( "Name(s) of target modules to apply LoRA. " "Use commas to separate multiple modules. " "Use `all` to specify all the linear modules." ) }, ) loraplus_lr_ratio: float | None = field( default=None, metadata={"help": "LoRA plus learning rate ratio (lr_B / lr_A)."}, ) loraplus_lr_embedding: float = field( default=1e-6, metadata={"help": "LoRA plus learning rate for lora embedding layers."}, ) use_rslora: bool = field( default=False, metadata={"help": "Whether or not to use the rank stabilization scaling factor for LoRA layer."}, ) use_dora: bool = field( default=False, metadata={"help": "Whether or not to use the weight-decomposed lora method (DoRA)."}, ) pissa_init: bool = field( default=False, metadata={"help": "Whether or not to initialize a PiSSA adapter."}, ) pissa_iter: int = field( default=16, metadata={"help": "The number of iteration steps performed by FSVD in PiSSA. Use -1 to disable it."}, ) pissa_convert: bool = field( default=False, metadata={"help": "Whether or not to convert the PiSSA adapter to a normal LoRA adapter."}, ) create_new_adapter: bool = field( default=False, metadata={"help": "Whether or not to create a new adapter with randomly initialized weight."}, ) @dataclass class OFTArguments: r"""Arguments pertaining to the OFT training.""" additional_target: str | None = field( default=None, metadata={ "help": ( "Name(s) of modules apart from LoRA layers to be set as trainable " "and saved in the final checkpoint. " "Use commas to separate multiple modules." ) }, ) module_dropout: float = field( default=0.0, metadata={"help": "Dropout rate for the OFT fine-tuning."}, ) oft_rank: int = field( default=0, metadata={"help": "The intrinsic dimension for OFT fine-tuning."}, ) oft_block_size: int = field( default=32, metadata={"help": "The intrinsic dimension for OFT fine-tuning."}, ) oft_target: str = field( default="all", metadata={ "help": ( "Name(s) of target modules to apply OFT. " "Use commas to separate multiple modules. " "Use `all` to specify all the linear modules." ) }, ) create_new_adapter: bool = field( default=False, metadata={"help": "Whether or not to create a new adapter with randomly initialized weight."}, ) @dataclass class RLHFArguments: r"""Arguments pertaining to the PPO, DPO and KTO training.""" pref_beta: float = field( default=0.1, metadata={"help": "The beta parameter in the preference loss."}, ) pref_ftx: float = field( default=0.0, metadata={"help": "The supervised fine-tuning loss coefficient in DPO training."}, ) pref_bco_weight: float = field( default=0.0, metadata={"help": "The Binary Classifier Optimization coefficient in DPO training."}, ) pref_loss: Literal["sigmoid", "hinge", "ipo", "kto_pair", "orpo", "simpo"] = field( default="sigmoid", metadata={"help": "The type of DPO loss to use."}, ) dpo_label_smoothing: float = field( default=0.0, metadata={"help": "The robust DPO label smoothing parameter in cDPO that should be between 0 and 0.5."}, ) kto_chosen_weight: float = field( default=1.0, metadata={"help": "The weight factor of the desirable losses in KTO training."}, ) kto_rejected_weight: float = field( default=1.0, metadata={"help": "The weight factor of the undesirable losses in KTO training."}, ) simpo_gamma: float = field( default=0.5, metadata={"help": "The target reward margin term in SimPO loss."}, ) ppo_buffer_size: int = field( default=1, metadata={"help": "The number of mini-batches to make experience buffer in a PPO optimization step."}, ) ppo_epochs: int = field( default=4, metadata={"help": "The number of epochs to perform in a PPO optimization step."}, ) ppo_score_norm: bool = field( default=False, metadata={"help": "Use score normalization in PPO training."}, ) ppo_target: float = field( default=6.0, metadata={"help": "Target KL value for adaptive KL control in PPO training."}, ) ppo_whiten_rewards: bool = field( default=False, metadata={"help": "Whiten the rewards before compute advantages in PPO training."}, ) ref_model: str | None = field( default=None, metadata={"help": "Path to the reference model used for the PPO or DPO training."}, ) ref_model_adapters: str | None = field( default=None, metadata={"help": "Path to the adapters of the reference model."}, ) ref_model_quantization_bit: int | None = field( default=None, metadata={"help": "The number of bits to quantize the reference model."}, ) reward_model: str | None = field( default=None, metadata={"help": "Path to the reward model used for the PPO training."}, ) reward_model_adapters: str | None = field( default=None, metadata={"help": "Path to the adapters of the reward model."}, ) reward_model_quantization_bit: int | None = field( default=None, metadata={"help": "The number of bits to quantize the reward model."}, ) reward_model_type: Literal["lora", "full", "api"] = field( default="lora", metadata={"help": "The type of the reward model in PPO training. Lora model only supports lora training."}, ) ld_alpha: float | None = field( default=None, metadata={ "help": ( "Alpha parameter from the LD-DPO paper, which controls the weighting of" " the verbose token log-probabilities in responses." ) }, ) @dataclass class GaloreArguments: r"""Arguments pertaining to the GaLore algorithm.""" use_galore: bool = field( default=False, metadata={"help": "Whether or not to use the gradient low-Rank projection (GaLore)."}, ) galore_target: str = field( default="all", metadata={ "help": ( "Name(s) of modules to apply GaLore. Use commas to separate multiple modules. " "Use `all` to specify all the linear modules." ) }, ) galore_rank: int = field( default=16, metadata={"help": "The rank of GaLore gradients."}, ) galore_update_interval: int = field( default=200, metadata={"help": "Number of steps to update the GaLore projection."}, ) galore_scale: float = field( default=2.0, metadata={"help": "GaLore scaling coefficient."}, ) galore_proj_type: Literal["std", "reverse_std", "right", "left", "full"] = field( default="std", metadata={"help": "Type of GaLore projection."}, ) galore_layerwise: bool = field( default=False, metadata={"help": "Whether or not to enable layer-wise update to further save memory."}, ) @dataclass class ApolloArguments: r"""Arguments pertaining to the APOLLO algorithm.""" use_apollo: bool = field( default=False, metadata={"help": "Whether or not to use the APOLLO optimizer."}, ) apollo_target: str = field( default="all", metadata={ "help": ( "Name(s) of modules to apply APOLLO. Use commas to separate multiple modules. " "Use `all` to specify all the linear modules." ) }, ) apollo_rank: int = field( default=16, metadata={"help": "The rank of APOLLO gradients."}, ) apollo_update_interval: int = field( default=200, metadata={"help": "Number of steps to update the APOLLO projection."}, ) apollo_scale: float = field( default=32.0, metadata={"help": "APOLLO scaling coefficient."}, ) apollo_proj: Literal["svd", "random"] = field( default="random", metadata={"help": "Type of APOLLO low-rank projection algorithm (svd or random)."}, ) apollo_proj_type: Literal["std", "right", "left"] = field( default="std", metadata={"help": "Type of APOLLO projection."}, ) apollo_scale_type: Literal["channel", "tensor"] = field( default="channel", metadata={"help": "Type of APOLLO scaling (channel or tensor)."}, ) apollo_layerwise: bool = field( default=False, metadata={"help": "Whether or not to enable layer-wise update to further save memory."}, ) apollo_scale_front: bool = field( default=False, metadata={"help": "Whether or not to use the norm-growth limiter in front of gradient scaling."}, ) @dataclass class BAdamArgument: r"""Arguments pertaining to the BAdam optimizer.""" use_badam: bool = field( default=False, metadata={"help": "Whether or not to use the BAdam optimizer."}, ) badam_mode: Literal["layer", "ratio"] = field( default="layer", metadata={"help": "Whether to use layer-wise or ratio-wise BAdam optimizer."}, ) badam_start_block: int | None = field( default=None, metadata={"help": "The starting block index for layer-wise BAdam."}, ) badam_switch_mode: Literal["ascending", "descending", "random", "fixed"] | None = field( default="ascending", metadata={"help": "the strategy of picking block to update for layer-wise BAdam."}, ) badam_switch_interval: int | None = field( default=50, metadata={ "help": "Number of steps to update the block for layer-wise BAdam. Use -1 to disable the block update." }, ) badam_update_ratio: float = field( default=0.05, metadata={"help": "The ratio of the update for ratio-wise BAdam."}, ) badam_mask_mode: Literal["adjacent", "scatter"] = field( default="adjacent", metadata={ "help": ( "The mode of the mask for BAdam optimizer. " "`adjacent` means that the trainable parameters are adjacent to each other, " "`scatter` means that trainable parameters are randomly choosed from the weight." ) }, ) badam_verbose: int = field( default=0, metadata={ "help": ( "The verbosity level of BAdam optimizer. " "0 for no print, 1 for print the block prefix, 2 for print trainable parameters." ) }, ) @dataclass class SwanLabArguments: use_swanlab: bool = field( default=False, metadata={"help": "Whether or not to use the SwanLab (an experiment tracking and visualization tool)."}, ) swanlab_project: str | None = field( default="llamafactory", metadata={"help": "The project name in SwanLab."}, ) swanlab_workspace: str | None = field( default=None, metadata={"help": "The workspace name in SwanLab."}, ) swanlab_run_name: str | None = field( default=None, metadata={"help": "The experiment name in SwanLab."}, ) swanlab_mode: Literal["cloud", "local"] = field( default="cloud", metadata={"help": "The mode of SwanLab."}, ) swanlab_api_key: str | None = field( default=None, metadata={"help": "The API key for SwanLab."}, ) swanlab_logdir: str | None = field( default=None, metadata={"help": "The log directory for SwanLab."}, ) swanlab_lark_webhook_url: str | None = field( default=None, metadata={"help": "The Lark(飞书) webhook URL for SwanLab."}, ) swanlab_lark_secret: str | None = field( default=None, metadata={"help": "The Lark(飞书) secret for SwanLab."}, ) @dataclass class FinetuningArguments( SwanLabArguments, BAdamArgument, ApolloArguments, GaloreArguments, RLHFArguments, LoraArguments, OFTArguments, FreezeArguments, ): r"""Arguments pertaining to which techniques we are going to fine-tuning with.""" pure_bf16: bool = field( default=False, metadata={"help": "Whether or not to train model in purely bf16 precision (without AMP)."}, ) stage: Literal["pt", "sft", "rm", "ppo", "dpo", "kto"] = field( default="sft", metadata={"help": "Which stage will be performed in training."}, ) finetuning_type: Literal["lora", "oft", "freeze", "full"] = field( default="lora", metadata={"help": "Which fine-tuning method to use."}, ) use_llama_pro: bool = field( default=False, metadata={"help": "Whether or not to make only the parameters in the expanded blocks trainable."}, ) use_adam_mini: bool = field( default=False, metadata={"help": "Whether or not to use the Adam-mini optimizer."}, ) use_mca: bool = field( default=False, metadata={ "help": ( "Whether or not to use MCA (Megatron Core Adapter) training. " "Controlled by USE_MCA environment variable." ) }, ) use_muon: bool = field( default=False, metadata={"help": "Whether or not to use the Muon optimizer."}, ) use_dft_loss: bool = field( default=False, metadata={"help": "Whether to use the DFT loss."}, ) freeze_vision_tower: bool = field( default=True, metadata={"help": "Whether ot not to freeze the vision tower in MLLM training."}, ) freeze_multi_modal_projector: bool = field( default=True, metadata={"help": "Whether or not to freeze the multi modal projector in MLLM training."}, ) freeze_language_model: bool = field( default=False, metadata={"help": "Whether or not to freeze the language model in MLLM training."}, ) compute_accuracy: bool = field( default=False, metadata={"help": "Whether or not to compute the token-level accuracy at evaluation."}, ) disable_shuffling: bool = field( default=False, metadata={"help": "Whether or not to disable the shuffling of the training set."}, ) early_stopping_steps: int | None = field( default=None, metadata={"help": "Number of steps to stop training if the `metric_for_best_model` does not improve."}, ) plot_loss: bool = field( default=False, metadata={"help": "Whether or not to save the training loss curves."}, ) include_effective_tokens_per_second: bool = field( default=False, metadata={"help": "Whether or not to compute effective tokens per second."}, ) def __post_init__(self): def split_arg(arg): if isinstance(arg, str): return [item.strip() for item in arg.split(",")] return arg self.freeze_trainable_modules: list[str] = split_arg(self.freeze_trainable_modules) self.freeze_extra_modules: list[str] | None = split_arg(self.freeze_extra_modules) self.lora_alpha: int = self.lora_alpha or self.lora_rank * 2 self.lora_target: list[str] = split_arg(self.lora_target) self.oft_target: list[str] = split_arg(self.oft_target) self.additional_target: list[str] | None = split_arg(self.additional_target) self.galore_target: list[str] = split_arg(self.galore_target) self.apollo_target: list[str] = split_arg(self.apollo_target) self.use_ref_model = self.stage == "dpo" and self.pref_loss not in ["orpo", "simpo"] assert self.finetuning_type in ["lora", "oft", "freeze", "full"], "Invalid fine-tuning method." assert self.ref_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization." assert self.reward_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization." if self.stage == "ppo" and self.reward_model is None: raise ValueError("`reward_model` is necessary for PPO training.") if self.stage == "ppo" and self.reward_model_type == "lora" and self.finetuning_type != "lora": raise ValueError("`reward_model_type` cannot be lora for Freeze/Full PPO training.") if self.stage == "ppo" and self.reward_model_type == "oft" and self.finetuning_type != "oft": raise ValueError("`reward_model_type` cannot be oft for Freeze/Full PPO training.") if self.stage == "dpo" and self.pref_loss != "sigmoid" and self.dpo_label_smoothing > 1e-6: raise ValueError("`dpo_label_smoothing` is only valid for sigmoid loss function.") if self.use_llama_pro and self.finetuning_type == "full": raise ValueError("`use_llama_pro` is only valid for Freeze or LoRA training.") if self.finetuning_type == "lora" and (self.use_galore or self.use_apollo or self.use_badam): raise ValueError("Cannot use LoRA with GaLore, APOLLO or BAdam together.") if int(self.use_galore) + int(self.use_apollo) + (self.use_badam) > 1: raise ValueError("Cannot use GaLore, APOLLO or BAdam together.") if self.pissa_init and (self.stage in ["ppo", "kto"] or self.use_ref_model): raise ValueError("Cannot use PiSSA for current training stage.") if self.finetuning_type != "lora": if self.loraplus_lr_ratio is not None: raise ValueError("`loraplus_lr_ratio` is only valid for LoRA training.") if self.use_rslora: raise ValueError("`use_rslora` is only valid for LoRA training.") if self.use_dora: raise ValueError("`use_dora` is only valid for LoRA training.") if self.pissa_init: raise ValueError("`pissa_init` is only valid for LoRA training.") def to_dict(self) -> dict[str, Any]: args = asdict(self) args = {k: f"<{k.upper()}>" if k.endswith("api_key") else v for k, v in args.items()} return args
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/hparams/__init__.py
src/llamafactory/hparams/__init__.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .data_args import DataArguments from .evaluation_args import EvaluationArguments from .finetuning_args import FinetuningArguments from .generating_args import GeneratingArguments from .model_args import ModelArguments from .parser import get_eval_args, get_infer_args, get_ray_args, get_train_args, read_args from .training_args import RayArguments, TrainingArguments __all__ = [ "DataArguments", "EvaluationArguments", "FinetuningArguments", "GeneratingArguments", "ModelArguments", "RayArguments", "TrainingArguments", "get_eval_args", "get_infer_args", "get_ray_args", "get_train_args", "read_args", ]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/hparams/evaluation_args.py
src/llamafactory/hparams/evaluation_args.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from dataclasses import dataclass, field from typing import Literal from datasets import DownloadMode @dataclass class EvaluationArguments: r"""Arguments pertaining to specify the evaluation parameters.""" task: str = field( metadata={"help": "Name of the evaluation task."}, ) task_dir: str = field( default="evaluation", metadata={"help": "Path to the folder containing the evaluation datasets."}, ) batch_size: int = field( default=4, metadata={"help": "The batch size per GPU for evaluation."}, ) seed: int = field( default=42, metadata={"help": "Random seed to be used with data loaders."}, ) lang: Literal["en", "zh"] = field( default="en", metadata={"help": "Language used at evaluation."}, ) n_shot: int = field( default=5, metadata={"help": "Number of examplars for few-shot learning."}, ) save_dir: str | None = field( default=None, metadata={"help": "Path to save the evaluation results."}, ) download_mode: DownloadMode = field( default=DownloadMode.REUSE_DATASET_IF_EXISTS, metadata={"help": "Download mode used for the evaluation datasets."}, ) def __post_init__(self): if self.save_dir is not None and os.path.exists(self.save_dir): raise ValueError("`save_dir` already exists, use another one.")
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/eval/template.py
src/llamafactory/eval/template.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from ..data import Role from ..extras.constants import CHOICES @dataclass class EvalTemplate: system: str choice: str answer: str def _parse_example(self, example: dict[str, str]) -> tuple[str, str]: r"""Parse eval example. input: a dict with keys {"question", "A", "B", "C", "D", "answer"} output: a tuple of (prompt, response). """ candidates = [self.choice.format(choice=ch, content=example[ch]) for ch in CHOICES if ch in example] return "".join([example["question"]] + candidates + [self.answer]), example["answer"] def format_example( self, target_data: dict[str, str], support_set: list[dict[str, str]], subject_name: str ) -> list[dict[str, str]]: r"""Convert dataset examples to messages.""" messages = [] for k in range(len(support_set)): prompt, response = self._parse_example(support_set[k]) messages.append({"role": Role.USER.value, "content": prompt}) messages.append({"role": Role.ASSISTANT.value, "content": response}) prompt, response = self._parse_example(target_data) messages.append({"role": Role.USER.value, "content": prompt}) messages.append({"role": Role.ASSISTANT.value, "content": response}) messages[0]["content"] = self.system.format(subject=subject_name) + messages[0]["content"] return messages eval_templates: dict[str, "EvalTemplate"] = {} def _register_eval_template(name: str, system: str, choice: str, answer: str) -> None: eval_templates[name] = EvalTemplate(system=system, choice=choice, answer=answer) def get_eval_template(name: str) -> "EvalTemplate": eval_template = eval_templates.get(name, None) assert eval_template is not None, f"Template {name} does not exist." return eval_template _register_eval_template( name="en", system="The following are multiple choice questions (with answers) about {subject}.\n\n", choice="\n{choice}. {content}", answer="\nAnswer:", ) _register_eval_template( name="zh", system="以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n", choice="\n{choice}. {content}", answer="\n答案:", )
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/eval/evaluator.py
src/llamafactory/eval/evaluator.py
# Copyright 2025 the LlamaFactory team. # # This code is inspired by the Dan's test library. # https://github.com/hendrycks/test/blob/master/evaluate_flan.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # MIT License # # Copyright (c) 2020 Dan Hendrycks # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import json import os from typing import TYPE_CHECKING, Any, Optional import numpy as np import torch from datasets import load_dataset from tqdm import tqdm, trange from transformers.utils import cached_file from ..data import get_template_and_fix_tokenizer from ..extras.constants import CHOICES, SUBJECTS from ..hparams import get_eval_args from ..model import load_model, load_tokenizer from .template import get_eval_template if TYPE_CHECKING: from numpy.typing import NDArray class Evaluator: def __init__(self, args: Optional[dict[str, Any]] = None) -> None: self.model_args, self.data_args, self.eval_args, finetuning_args = get_eval_args(args) self.tokenizer = load_tokenizer(self.model_args)["tokenizer"] self.tokenizer.padding_side = "right" # avoid overflow issue in batched inference for llama2 self.template = get_template_and_fix_tokenizer(self.tokenizer, self.data_args) self.model = load_model(self.tokenizer, self.model_args, finetuning_args) self.eval_template = get_eval_template(self.eval_args.lang) self.choice_inputs = [self.tokenizer.encode(ch, add_special_tokens=False)[-1] for ch in CHOICES] @torch.inference_mode() def batch_inference(self, batch_input: dict[str, "torch.Tensor"]) -> list[str]: logits = self.model(**batch_input).logits lengths = torch.sum(batch_input["attention_mask"], dim=-1) word_probs = torch.stack([logits[i, lengths[i] - 1] for i in range(len(lengths))], dim=0) choice_probs = torch.nn.functional.softmax(word_probs[:, self.choice_inputs], dim=-1).detach() return [chr(ord("A") + offset.item()) for offset in torch.argmax(choice_probs, dim=-1)] def eval(self) -> None: eval_task = self.eval_args.task.split("_")[0] eval_split = self.eval_args.task.split("_")[1] mapping = cached_file( path_or_repo_id=os.path.join(self.eval_args.task_dir, eval_task), filename="mapping.json", cache_dir=self.model_args.cache_dir, token=self.model_args.hf_hub_token, ) with open(mapping, encoding="utf-8") as f: categorys: dict[str, dict[str, str]] = json.load(f) category_corrects = {subj: np.array([], dtype="bool") for subj in SUBJECTS} pbar = tqdm(categorys.keys(), desc="Processing subjects", position=0) results = {} for subject in pbar: dataset = load_dataset( path=os.path.join(self.eval_args.task_dir, eval_task), name=subject, cache_dir=self.model_args.cache_dir, download_mode=self.eval_args.download_mode, token=self.model_args.hf_hub_token, trust_remote_code=self.model_args.trust_remote_code, ) pbar.set_postfix_str(categorys[subject]["name"]) inputs, outputs, labels = [], [], [] for i in trange(len(dataset[eval_split]), desc="Formatting batches", position=1, leave=False): support_set = ( dataset["train"].shuffle().select(range(min(self.eval_args.n_shot, len(dataset["train"])))) ) messages = self.eval_template.format_example( target_data=dataset[eval_split][i], support_set=support_set, subject_name=categorys[subject]["name"], ) input_ids, _ = self.template.encode_oneturn(tokenizer=self.tokenizer, messages=messages) inputs.append({"input_ids": input_ids, "attention_mask": [1] * len(input_ids)}) labels.append(messages[-1]["content"]) for i in trange( 0, len(inputs), self.eval_args.batch_size, desc="Predicting batches", position=1, leave=False ): batch_input = self.tokenizer.pad( inputs[i : i + self.eval_args.batch_size], return_attention_mask=True, return_tensors="pt" ).to(self.model.device) preds = self.batch_inference(batch_input) outputs += preds corrects = np.array(outputs) == np.array(labels) category_name = categorys[subject]["category"] category_corrects[category_name] = np.concatenate([category_corrects[category_name], corrects], axis=0) category_corrects["Average"] = np.concatenate([category_corrects["Average"], corrects], axis=0) results[subject] = {str(i): outputs[i] for i in range(len(outputs))} pbar.close() self._save_results(category_corrects, results) def _save_results(self, category_corrects: dict[str, "NDArray"], results: dict[str, dict[int, str]]) -> None: score_info = "\n".join( [ f"{category_name:>15}: {100 * np.mean(category_correct):.2f}" for category_name, category_correct in category_corrects.items() if len(category_correct) ] ) print(score_info) if self.eval_args.save_dir is not None: os.makedirs(self.eval_args.save_dir, exist_ok=False) with open(os.path.join(self.eval_args.save_dir, "results.json"), "w", encoding="utf-8", newline="\n") as f: json.dump(results, f, indent=2) with open(os.path.join(self.eval_args.save_dir, "results.log"), "w", encoding="utf-8", newline="\n") as f: f.write(score_info) def run_eval() -> None: Evaluator().eval()
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/eval/__init__.py
src/llamafactory/eval/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/data_utils.py
src/llamafactory/data/data_utils.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from enum import Enum, unique from typing import TYPE_CHECKING, Any, Optional, TypedDict, Union import fsspec from datasets import DatasetDict, concatenate_datasets, interleave_datasets from ..extras import logging if TYPE_CHECKING: from datasets import Dataset, IterableDataset from ..hparams import DataArguments logger = logging.get_logger(__name__) SLOTS = list[Union[str, set[str], dict[str, str]]] @unique class Role(str, Enum): USER = "user" ASSISTANT = "assistant" SYSTEM = "system" FUNCTION = "function" OBSERVATION = "observation" class DatasetModule(TypedDict): train_dataset: Optional[Union["Dataset", "IterableDataset"]] eval_dataset: Optional[Union["Dataset", "IterableDataset", dict[str, "Dataset"]]] def merge_dataset( all_datasets: list[Union["Dataset", "IterableDataset"]], data_args: "DataArguments", seed: int ) -> Union["Dataset", "IterableDataset"]: r"""Merge multiple datasets to a unified dataset.""" if len(all_datasets) == 1: return all_datasets[0] elif data_args.mix_strategy == "concat": if data_args.streaming: logger.warning_rank0_once("The samples between different datasets will not be mixed in streaming mode.") return concatenate_datasets(all_datasets) elif data_args.mix_strategy.startswith("interleave"): if not data_args.streaming: logger.warning_rank0_once("We recommend using `mix_strategy=concat` in non-streaming mode.") return interleave_datasets( datasets=all_datasets, probabilities=data_args.interleave_probs, seed=seed, stopping_strategy="first_exhausted" if data_args.mix_strategy.endswith("under") else "all_exhausted", ) else: raise ValueError(f"Unknown mixing strategy: {data_args.mix_strategy}.") def split_dataset( dataset: Optional[Union["Dataset", "IterableDataset"]], eval_dataset: Optional[Union["Dataset", "IterableDataset", dict[str, "Dataset"]]], data_args: "DataArguments", seed: int, ) -> tuple[dict, dict]: r"""Split the dataset and returns two dicts containing train set and validation set. Support both map dataset and iterable dataset. Returns: train_dict: Dictionary containing training data with key "train" eval_dict: Dictionary containing evaluation data with keys "validation" or "validation_{name}" """ if eval_dataset is not None and data_args.val_size > 1e-6: raise ValueError("Cannot specify `val_size` if `eval_dataset` is not None.") # the train and eval better to in dict dtype and separately return for cpode clearly and good handle outside train_dict, eval_dict = {}, {} if dataset is not None: if data_args.streaming: dataset = dataset.shuffle(buffer_size=data_args.buffer_size, seed=seed) if data_args.val_size > 1e-6: if data_args.streaming: eval_dict["validation"] = dataset.take(int(data_args.val_size)) train_dict["train"] = dataset.skip(int(data_args.val_size)) else: val_size = int(data_args.val_size) if data_args.val_size > 1 else data_args.val_size split_result = dataset.train_test_split(test_size=val_size, seed=seed) train_dict["train"] = split_result["train"] eval_dict["validation"] = split_result["test"] else: train_dict["train"] = dataset if eval_dataset is not None: if isinstance(eval_dataset, dict): for name, data in eval_dataset.items(): eval_dict[f"validation_{name}"] = data else: if data_args.streaming: eval_dataset = eval_dataset.shuffle(buffer_size=data_args.buffer_size, seed=seed) eval_dict["validation"] = eval_dataset return train_dict, eval_dict def get_dataset_module(dataset: Union["Dataset", "DatasetDict"]) -> "DatasetModule": r"""Convert dataset or dataset dict to dataset module.""" dataset_module: DatasetModule = {} if isinstance(dataset, DatasetDict): # dataset dict if "train" in dataset: dataset_module["train_dataset"] = dataset["train"] if "validation" in dataset: dataset_module["eval_dataset"] = dataset["validation"] else: eval_dataset = {} for key in dataset.keys(): if key.startswith("validation_"): eval_dataset[key[len("validation_") :]] = dataset[key] if len(eval_dataset): dataset_module["eval_dataset"] = eval_dataset else: # single dataset dataset_module["train_dataset"] = dataset return dataset_module def setup_fs(path: str, anon: bool = False) -> "fsspec.AbstractFileSystem": r"""Set up a filesystem object based on the path protocol.""" storage_options = {"anon": anon} if anon else {} if path.startswith("s3://"): fs = fsspec.filesystem("s3", **storage_options) elif path.startswith(("gs://", "gcs://")): fs = fsspec.filesystem("gcs", **storage_options) else: raise ValueError(f"Unsupported protocol in path: {path}. Use 's3://' or 'gs://'.") if not fs.exists(path): raise ValueError(f"Path does not exist: {path}.") return fs def _read_json_with_fs(fs: "fsspec.AbstractFileSystem", path: str) -> list[Any]: r"""Helper function to read JSON/JSONL files using fsspec.""" with fs.open(path, "r") as f: if path.endswith(".jsonl"): return [json.loads(line) for line in f if line.strip()] else: return json.load(f) def read_cloud_json(cloud_path: str) -> list[Any]: r"""Read a JSON/JSONL file from cloud storage (S3 or GCS). Args: cloud_path: str Cloud path in the format: - 's3://bucket-name/file.json' for AWS S3 - 'gs://bucket-name/file.jsonl' or 'gcs://bucket-name/file.jsonl' for Google Cloud Storage """ try: fs = setup_fs(cloud_path, anon=True) # try with anonymous access first except Exception: fs = setup_fs(cloud_path) # try again with credentials # filter out non-JSON files files = [x["Key"] for x in fs.listdir(cloud_path)] if fs.isdir(cloud_path) else [cloud_path] files = filter(lambda file: file.endswith(".json") or file.endswith(".jsonl"), files) if not files: raise ValueError(f"No JSON/JSONL files found in the specified path: {cloud_path}.") return sum([_read_json_with_fs(fs, file) for file in files], [])
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/mm_plugin.py
src/llamafactory/data/mm_plugin.py
# Copyright 2025 HuggingFace Inc. and the LlamaFactory team. # # This code is inspired by the HuggingFace's Transformers library. # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llava/processing_llava.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math import os import re from copy import deepcopy from dataclasses import dataclass from io import BytesIO from typing import TYPE_CHECKING, BinaryIO, Literal, NotRequired, Optional, TypedDict, Union import numpy as np import torch import torchaudio from transformers.image_utils import get_image_size, is_valid_image, to_numpy_array from transformers.models.mllama.processing_mllama import ( convert_sparse_cross_attention_mask_to_dense, get_cross_attention_token_mask, ) from typing_extensions import override from ..extras.constants import AUDIO_PLACEHOLDER, IGNORE_INDEX, IMAGE_PLACEHOLDER, VIDEO_PLACEHOLDER from ..extras.packages import is_pillow_available, is_pyav_available, is_transformers_version_greater_than if is_pillow_available(): from PIL import Image from PIL.Image import Image as ImageObject if is_pyav_available(): import av if is_transformers_version_greater_than("4.52.0"): from transformers.image_utils import make_flat_list_of_images from transformers.video_utils import make_batched_videos else: from transformers.image_utils import make_batched_videos, make_flat_list_of_images if TYPE_CHECKING: from av.stream import Stream from numpy.typing import NDArray from transformers import PreTrainedTokenizer, ProcessorMixin from transformers.feature_extraction_sequence_utils import SequenceFeatureExtractor from transformers.image_processing_utils import BaseImageProcessor from transformers.video_processing_utils import BaseVideoProcessor class EncodedImage(TypedDict): path: str | None bytes: bytes | None ImageInput = Union[str, bytes, EncodedImage, BinaryIO, ImageObject] VideoInput = Union[str, BinaryIO, list[list[ImageInput]]] AudioInput = Union[str, BinaryIO, NDArray] class RegularizedImageOutput(TypedDict): images: list[ImageObject] class RegularizedVideoOutput(TypedDict): videos: list[list[ImageObject]] durations: list[float] fps_per_video: NotRequired[list[float]] class RegularizedAudioOutput(TypedDict): audios: list[NDArray] sampling_rates: list[float] class MMProcessor(ProcessorMixin): patch_size: int image_seq_length: int num_additional_image_tokens: int vision_feature_select_strategy: Literal["default", "full"] def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int: pass def _get_paligemma_token_type_ids(imglens: list[int], seqlens: list[int], processor: "MMProcessor") -> list[list[int]]: r"""Get paligemma token type ids for computing loss. It is slightly different with the original token type ids where the prompt part is 0. Returns: batch_token_type_ids: shape (batch_size, seq_length) """ batch_token_type_ids = [] for imglen, seqlen in zip(imglens, seqlens): image_seqlen = imglen * processor.image_seq_length batch_token_type_ids.append([0] * image_seqlen + [1] * (seqlen - image_seqlen)) return batch_token_type_ids def _get_gemma3_token_type_ids(batch_ids: list[list[int]], processor: "MMProcessor"): r"""Get gemma3 token type ids for computing loss. Returns: batch_token_type_ids: shape (batch_size, seq_length) """ image_token_id: int = getattr(processor, "image_token_id") batch_token_type_ids = [] for token_ids in batch_ids: token_ids = np.array(token_ids) token_type_ids = np.zeros_like(token_ids) token_type_ids[token_ids == image_token_id] = 1 batch_token_type_ids.append(token_type_ids.tolist()) return batch_token_type_ids def _make_batched_images(images: list["ImageObject"], imglens: list[int]) -> list[list["ImageObject"]]: r"""Make nested list of images.""" batch_images = [] for imglen in imglens: batch_images.append(images[:imglen]) images = images[imglen:] return batch_images def _check_video_is_nested_images(video: "VideoInput") -> bool: r"""Check if the video is nested images.""" return isinstance(video, list) and all(isinstance(frame, (str, BinaryIO, dict, ImageObject)) for frame in video) @dataclass class MMPluginMixin: image_token: str | None video_token: str | None audio_token: str | None expand_mm_tokens: bool = True def _validate_input( self, processor: Optional["MMProcessor"], images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], ) -> None: r"""Validate if this model accepts the input modalities.""" image_processor: BaseImageProcessor = getattr(processor, "image_processor", None) video_processor: BaseImageProcessor = getattr( processor, "video_processor", getattr(processor, "image_processor", None) ) feature_extractor: SequenceFeatureExtractor = getattr(processor, "feature_extractor", None) if len(images) != 0 and self.image_token is None: raise ValueError( "This model does not support image input. Please check whether the correct `template` is used." ) if len(videos) != 0 and self.video_token is None: raise ValueError( "This model does not support video input. Please check whether the correct `template` is used." ) if len(audios) != 0 and self.audio_token is None: raise ValueError( "This model does not support audio input. Please check whether the correct `template` is used." ) if self.image_token is not None and processor is None: raise ValueError("Processor was not found, please check and update your model file.") if self.image_token is not None and image_processor is None: raise ValueError("Image processor was not found, please check and update your model file.") if self.video_token is not None and video_processor is None: raise ValueError("Video processor was not found, please check and update your model file.") if self.audio_token is not None and feature_extractor is None: raise ValueError("Audio feature extractor was not found, please check and update your model file.") def _validate_messages( self, messages: list[dict[str, str]], images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], ): r"""Validate if the number of images, videos and audios match the number of placeholders in messages.""" num_image_tokens, num_video_tokens, num_audio_tokens = 0, 0, 0 for message in messages: num_image_tokens += message["content"].count(IMAGE_PLACEHOLDER) num_video_tokens += message["content"].count(VIDEO_PLACEHOLDER) num_audio_tokens += message["content"].count(AUDIO_PLACEHOLDER) if len(images) != num_image_tokens: raise ValueError( f"The number of images does not match the number of {IMAGE_PLACEHOLDER} tokens in {messages}." ) if len(videos) != num_video_tokens: raise ValueError( f"The number of videos does not match the number of {VIDEO_PLACEHOLDER} tokens in {messages}." ) if len(audios) != num_audio_tokens: raise ValueError( f"The number of audios does not match the number of {AUDIO_PLACEHOLDER} tokens in {messages}." ) def _preprocess_image( self, image: "ImageObject", image_max_pixels: int, image_min_pixels: int, **kwargs ) -> "ImageObject": r"""Pre-process a single image.""" if (image.width * image.height) > image_max_pixels: resize_factor = math.sqrt(image_max_pixels / (image.width * image.height)) width, height = int(image.width * resize_factor), int(image.height * resize_factor) image = image.resize((width, height)) if (image.width * image.height) < image_min_pixels: resize_factor = math.sqrt(image_min_pixels / (image.width * image.height)) width, height = int(image.width * resize_factor), int(image.height * resize_factor) image = image.resize((width, height)) if image.mode != "RGB": image = image.convert("RGB") return image def _get_video_sample_indices( self, video_stream: "Stream", video_fps: float, video_maxlen: int, **kwargs ) -> list[int]: r"""Compute video sample indices according to fps.""" total_frames = video_stream.frames if total_frames == 0: # infinite video return np.linspace(0, video_maxlen - 1, video_maxlen).astype(np.int32) sample_frames = max(1, math.floor(float(video_stream.duration * video_stream.time_base) * video_fps)) sample_frames = min(total_frames, video_maxlen, sample_frames) return np.linspace(0, total_frames - 1, sample_frames).astype(np.int32) def _regularize_images(self, images: list["ImageInput"], **kwargs) -> "RegularizedImageOutput": r"""Regularize images to avoid error. Including reading and pre-processing.""" results = [] for image in images: if isinstance(image, (str, BinaryIO)): image = Image.open(image) elif isinstance(image, bytes): image = Image.open(BytesIO(image)) elif isinstance(image, dict): if image["bytes"] is not None: image = Image.open(BytesIO(image["bytes"])) else: image = Image.open(image["path"]) if not isinstance(image, ImageObject): raise ValueError(f"Expect input is a list of images, but got {type(image)}.") results.append(self._preprocess_image(image, **kwargs)) return {"images": results} def _regularize_videos(self, videos: list["VideoInput"], **kwargs) -> "RegularizedVideoOutput": r"""Regularizes videos to avoid error. Including reading, resizing and converting.""" results = [] durations = [] for video in videos: frames: list[ImageObject] = [] if _check_video_is_nested_images(video): for frame in video: if not is_valid_image(frame) and not isinstance(frame, dict) and not os.path.exists(frame): raise ValueError("Invalid image found in video frames.") frames = video durations.append(len(frames) / kwargs.get("video_fps", 2.0)) else: container = av.open(video, "r") video_stream = next(stream for stream in container.streams if stream.type == "video") sample_indices = self._get_video_sample_indices(video_stream, **kwargs) container.seek(0) for frame_idx, frame in enumerate(container.decode(video_stream)): if frame_idx in sample_indices: frames.append(frame.to_image()) if video_stream.duration is None: durations.append(len(frames) / kwargs.get("video_fps", 2.0)) else: durations.append(float(video_stream.duration * video_stream.time_base)) frames = self._regularize_images(frames, **kwargs)["images"] results.append(frames) return {"videos": results, "durations": durations} def _regularize_audios( self, audios: list["AudioInput"], sampling_rate: float, **kwargs ) -> "RegularizedAudioOutput": r"""Regularizes audios to avoid error. Including reading and resampling.""" results, sampling_rates = [], [] for audio in audios: if not isinstance(audio, np.ndarray): audio, sr = torchaudio.load(audio) if audio.shape[0] > 1: audio = audio.mean(dim=0, keepdim=True) if sr != sampling_rate: audio = torchaudio.functional.resample(audio, sr, sampling_rate) audio = audio.squeeze(0).numpy() results.append(audio) sampling_rates.append(sampling_rate) return {"audios": results, "sampling_rates": sampling_rates} def _get_mm_inputs( self, images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], processor: "MMProcessor", imglens: list[int] | None = None, ) -> dict[str, "torch.Tensor"]: r"""Process visual inputs. Returns: (llava and paligemma) pixel_values: tensor with shape (B, C, H, W) Returns: (qwen2-vl) pixel_values: tensor with shape (num_patches, patch_dim) image_grid_thw: tensor with shape (num_images, 3), where the three numbers are time, width, height where num_patches == torch.prod(image_grid_thw) Returns: (mllama) pixel_values: tensor with shape (batch_size, max_num_images, max_image_tiles, channels, tile_height, tile_width) For example, (2, 1, 4, 3, 560, 560). aspect_ratio_ids: tensor with shape (batch_size, max_num_images). For example, (2, 1). aspect_ratio_mask: tensor with shape (batch_size, max_num_images, max_image_tiles). For example, (2, 1, 4). num_tiles: List[List[int]] with shape (batch_size, num_images_in_batch). For example, (2, 1). """ mm_inputs = {} if len(images) != 0: image_processor: BaseImageProcessor = getattr(processor, "image_processor", None) images = self._regularize_images( images, image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768), image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32), )["images"] if imglens is not None: # if imglens are provided, make batched images images = _make_batched_images(images, imglens) image_processor_kwargs = {} if getattr(processor, "image_do_pan_and_scan", False): # gemma3 image processor image_processor_kwargs.update( { "do_pan_and_scan": True, "pan_and_scan_min_crop_size": 256, "pan_and_scan_max_num_crops": 4, "pan_and_scan_min_ratio_to_activate": 1.2, } ) mm_inputs.update(image_processor(images, return_tensors="pt", **image_processor_kwargs)) if len(videos) != 0: video_processor: BaseImageProcessor = getattr( processor, "video_processor", getattr(processor, "image_processor", None) ) videos = self._regularize_videos( videos, image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256), image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16), video_fps=getattr(processor, "video_fps", 2.0), video_maxlen=getattr(processor, "video_maxlen", 128), )["videos"] if "videos" in inspect.signature(video_processor.preprocess).parameters: # for qwen2_vl and video_llava mm_inputs.update(video_processor(images=None, videos=videos, return_tensors="pt")) else: # for llava_next_video mm_inputs.update(video_processor(videos, return_tensors="pt")) if len(audios) != 0: feature_extractor: SequenceFeatureExtractor = getattr(processor, "feature_extractor", None) audios = self._regularize_audios( audios, sampling_rate=getattr(processor, "audio_sampling_rate", 16000), )["audios"] mm_inputs.update( feature_extractor( audios, sampling_rate=getattr(processor, "audio_sampling_rate", 16000), return_attention_mask=True, padding="max_length", return_tensors="pt", ) ) mm_inputs["feature_attention_mask"] = mm_inputs.pop("attention_mask", None) # prevent conflicts return mm_inputs @dataclass class BasePlugin(MMPluginMixin): def process_messages( self, messages: list[dict[str, str]], images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], processor: Optional["MMProcessor"], ) -> list[dict[str, str]]: r"""Pre-process input messages before tokenization for VLMs.""" self._validate_input(processor, images, videos, audios) return messages def process_token_ids( self, input_ids: list[int], labels: list[int] | None, images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], tokenizer: "PreTrainedTokenizer", processor: Optional["MMProcessor"], ) -> tuple[list[int], list[int] | None]: r"""Pre-process token ids after tokenization for VLMs.""" self._validate_input(processor, images, videos, audios) return input_ids, labels def get_mm_inputs( self, images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], imglens: list[int], vidlens: list[int], audlens: list[int], batch_ids: list[list[int]], processor: Optional["MMProcessor"], ) -> dict[str, Union[list[int], "torch.Tensor"]]: r"""Build batched multimodal inputs for VLMs. Arguments: images: a list of image inputs, shape (num_images,) videos: a list of video inputs, shape (num_videos,) audios: a list of audio inputs, shape (num_audios,) imglens: number of images in each sample, shape (batch_size,) vidlens: number of videos in each sample, shape (batch_size,) audlens: number of audios in each sample, shape (batch_size,) batch_ids: token ids of input samples, shape (batch_size, seq_len) processor: a processor for pre-processing images and videos """ self._validate_input(processor, images, videos, audios) return self._get_mm_inputs(images, videos, audios, processor) @dataclass class ErnieVLPlugin(BasePlugin): @override def process_messages( self, messages: list[dict[str, str]], images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], processor: Optional["MMProcessor"], ) -> list[dict[str, str]]: self._validate_input(processor, images, videos, audios) self._validate_messages(messages, images, videos, audios) messages = deepcopy(messages) image_processor: BaseImageProcessor = getattr(processor, "image_processor") merge_length: int = getattr(image_processor, "merge_size") ** 2 if self.expand_mm_tokens: mm_inputs = self._get_mm_inputs(images, videos, audios, processor) image_grid_thw = mm_inputs.get("image_grid_thw", []) video_grid_thw = mm_inputs.get("video_grid_thw", []) else: image_grid_thw = [None] * len(images) video_grid_thw = [None] * len(videos) image_idx, video_idx = 0, 0 for message in messages: content = message["content"] image_token = self.image_token or "<|IMAGE_PLACEHOLDER|>" video_token = self.video_token or "<|VIDEO_PLACEHOLDER|>" while IMAGE_PLACEHOLDER in content: image_seqlen = image_grid_thw[image_idx].prod() // merge_length if self.expand_mm_tokens else 1 content = content.replace( IMAGE_PLACEHOLDER, f"Picture {image_idx + 1}:<|IMAGE_START|>{image_token * image_seqlen}<|IMAGE_END|>", 1, ) image_idx += 1 while VIDEO_PLACEHOLDER in content: video_seqlen = video_grid_thw[video_idx].prod() // merge_length if self.expand_mm_tokens else 1 content = content.replace( VIDEO_PLACEHOLDER, f"Video {video_idx + 1}:<|VIDEO_START|>{video_token * video_seqlen}<|VIDEO_END|>", 1, ) video_idx += 1 message["content"] = content return messages @dataclass class Gemma3Plugin(BasePlugin): @override def process_messages( self, messages: list[dict[str, str]], images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], processor: Optional["MMProcessor"], ) -> list[dict[str, str]]: self._validate_input(processor, images, videos, audios) self._validate_messages(messages, images, videos, audios) num_image_tokens = 0 messages = deepcopy(messages) boi_token: str = getattr(processor, "boi_token") full_image_sequence: str = getattr(processor, "full_image_sequence") image_str = full_image_sequence if self.expand_mm_tokens else boi_token do_pan_and_scan: bool = getattr(processor, "image_do_pan_and_scan", False) if do_pan_and_scan: mm_inputs = self._get_mm_inputs(images, videos, audios, processor) for message in messages: content = message["content"] while IMAGE_PLACEHOLDER in content: if do_pan_and_scan: image_placeholder_str = ( "Here is the original image {{image}} and here are some crops to help you see better " + " ".join(["{{image}}"] * mm_inputs["num_crops"][0][num_image_tokens]) ) else: image_placeholder_str = "{{image}}" content = content.replace(IMAGE_PLACEHOLDER, image_placeholder_str, 1) num_image_tokens += 1 message["content"] = content.replace("{{image}}", image_str) return messages @override def get_mm_inputs( self, images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], imglens: list[int], vidlens: list[int], audlens: list[int], batch_ids: list[list[int]], processor: Optional["MMProcessor"], ) -> dict[str, Union[list[int], "torch.Tensor"]]: self._validate_input(processor, images, videos, audios) mm_inputs = self._get_mm_inputs(images, videos, audios, processor) mm_inputs.pop("num_crops", None) mm_inputs["token_type_ids"] = _get_gemma3_token_type_ids(batch_ids, processor) return mm_inputs class Gemma3nPlugin(Gemma3Plugin): @override def process_messages( self, messages: list[dict[str, str]], images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], processor: Optional["MMProcessor"], ) -> list[dict[str, str]]: self._validate_input(processor, images, videos, audios) self._validate_messages(messages, images, videos, audios) messages = deepcopy(messages) boi_token: str = getattr(processor, "boi_token") boa_token: str = getattr(processor, "boa_token") full_image_sequence: str = getattr(processor, "full_image_sequence") full_audio_sequence: str = getattr(processor, "full_audio_sequence") image_str = full_image_sequence if self.expand_mm_tokens else boi_token audio_str = full_audio_sequence if self.expand_mm_tokens else boa_token for message in messages: content = message["content"] while IMAGE_PLACEHOLDER in content: content = content.replace(IMAGE_PLACEHOLDER, image_str, 1) while AUDIO_PLACEHOLDER in content: content = content.replace(AUDIO_PLACEHOLDER, audio_str, 1) message["content"] = content return messages @dataclass class InternVLPlugin(BasePlugin): @override def _get_mm_inputs( self, images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], processor: "ProcessorMixin", **kwargs, ) -> dict[str, "torch.Tensor"]: image_processor: BaseImageProcessor = getattr(processor, "image_processor") image_processor_kwargs = {} if getattr(processor, "crop_to_patches", False): image_processor_kwargs.update( { "crop_to_patches": True, "max_patches": 12, "min_patches": 1, } ) mm_inputs = {} image_video_patches = [] if len(images) != 0: images = self._regularize_images( images, image_max_pixels=getattr(processor, "image_max_pixels", 1024 * 1024), image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32), )["images"] if len(videos) != 0: videos = self._regularize_videos( videos, image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256), image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16), video_fps=getattr(processor, "video_fps", 2.0), video_maxlen=getattr(processor, "video_maxlen", 128), )["videos"] if len(images) != 0: images = make_flat_list_of_images(images) image_inputs = image_processor(images=images, return_tensors="pt", **image_processor_kwargs) image_num_patches = image_inputs.pop("num_patches") image_pixel_values = image_inputs.pop("pixel_values") image_num_patches_indices = np.cumsum(image_num_patches) if len(videos) != 0: videos = make_batched_videos(videos) num_frames_per_video = [len(video) for video in videos] patch_indices = np.cumsum(num_frames_per_video) image_processor_kwargs["crop_to_patches"] = False video_inputs = image_processor(images=videos, return_tensors="pt", **image_processor_kwargs) video_num_patches = video_inputs.pop("num_patches") video_pixel_values = video_inputs.pop("pixel_values") video_num_patches_indices = np.cumsum(video_num_patches) # NOT SUPPORT IMAGE VIDEO INTERLEAVED if len(images) != 0 and image_pixel_values is not None: for i in range(len(images)): start_index = image_num_patches_indices[i - 1] if i > 0 else 0 end_index = image_num_patches_indices[i] image_video_patches.append(image_pixel_values[start_index:end_index]) if len(videos) != 0 and video_pixel_values is not None: patch_indices_with_prefix = [0] + list(patch_indices) for i in range(len(videos)): current_patch_index = patch_indices_with_prefix[i] end_patch_index = patch_indices_with_prefix[i + 1] start_index = video_num_patches_indices[current_patch_index - 1] if i > 0 else 0 end_index = video_num_patches_indices[end_patch_index - 1] image_video_patches.append(video_pixel_values[start_index:end_index]) if len(images) != 0 or len(videos) != 0: mm_inputs["pixel_values"] = torch.cat(image_video_patches, dim=0) if len(images) != 0: mm_inputs.update({"image_num_patches": image_num_patches}) if len(videos) != 0: mm_inputs.update({"video_patch_indices": patch_indices}) mm_inputs.update({"video_num_patches": video_num_patches}) return mm_inputs @override def process_messages( self, messages: list[dict[str, str]], images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], processor: Optional["ProcessorMixin"], ) -> list[dict[str, str]]: self._validate_input(processor, images, videos, audios) self._validate_messages(messages, images, videos, audios) num_image_tokens, num_video_tokens = 0, 0 image_seqlen = getattr(processor, "image_seq_length") if self.expand_mm_tokens else 1 messages = deepcopy(messages) mm_inputs = self._get_mm_inputs(images, videos, audios, processor) image_pixel_patch_list = mm_inputs.get("image_num_patches") # pathes of images video_num_patches = mm_inputs.get("video_num_patches") # all patches for frames of videos video_patch_indices = mm_inputs.get("video_patch_indices") # num frames of per video for message in messages: content = message["content"] while IMAGE_PLACEHOLDER in content: content = content.replace( IMAGE_PLACEHOLDER, f"<img>{'<IMG_CONTEXT>' * image_seqlen * image_pixel_patch_list[num_image_tokens]}</img>", 1, ) num_image_tokens += 1 while VIDEO_PLACEHOLDER in content: current_patch_index = video_patch_indices[num_video_tokens - 1] if num_video_tokens > 0 else 0 end_patch_index = video_patch_indices[num_video_tokens] num_patches = list(video_num_patches[current_patch_index:end_patch_index]) video_replaced_prompt = "\n".join( f"Frame{i + 1}: <img>{'<IMG_CONTEXT>' * image_seqlen * num_patches[i]}</img>" for i in range(len(num_patches)) ) content = content.replace(VIDEO_PLACEHOLDER, video_replaced_prompt, 1) num_video_tokens += 1 message["content"] = content return messages @override def get_mm_inputs( self, images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], imglens: list[int], vidlens: list[int], audlens: list[int], batch_ids: list[list[int]], processor: Optional["ProcessorMixin"], ) -> dict[str, Union[list[int], "torch.Tensor"]]: self._validate_input(processor, images, videos, audios) mm_inputs = self._get_mm_inputs(images, videos, audios, processor) mm_inputs.pop("image_num_patches", None) mm_inputs.pop("video_patch_indices", None) mm_inputs.pop("video_num_patches", None) return mm_inputs class KimiVLPlugin(BasePlugin): @override def process_messages(self, messages, images, videos, audios, processor): self._validate_input(processor, images, videos, audios) self._validate_messages(messages, images, videos, audios) if self.expand_mm_tokens: mm_inputs = self._get_mm_inputs(images, videos, audios, processor) image_grid_hws = mm_inputs.get("image_grid_hws", []) else: image_grid_hws = [None] * len(images) num_image_tokens = 0 image_processor: BaseImageProcessor = getattr(processor, "image_processor") merge_length = math.prod(image_processor.merge_kernel_size) messages = deepcopy(messages) for message in messages: content = message["content"] while IMAGE_PLACEHOLDER in content: image_seqlen = image_grid_hws[num_image_tokens].prod() // merge_length if self.expand_mm_tokens else 1
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
true
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/loader.py
src/llamafactory/data/loader.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import TYPE_CHECKING, Literal, Optional, Union import numpy as np from datasets import Dataset, DatasetDict, load_dataset, load_from_disk from ..extras import logging from ..extras.constants import FILEEXT2TYPE from ..extras.misc import check_version, has_tokenized_data from .converter import align_dataset from .data_utils import get_dataset_module, merge_dataset, read_cloud_json, split_dataset from .parser import get_dataset_list from .processor import ( FeedbackDatasetProcessor, PackedSupervisedDatasetProcessor, PairwiseDatasetProcessor, PretrainDatasetProcessor, SupervisedDatasetProcessor, UnsupervisedDatasetProcessor, ) if TYPE_CHECKING: from datasets import Dataset, IterableDataset from transformers import PreTrainedTokenizer, ProcessorMixin, Seq2SeqTrainingArguments from ..hparams import DataArguments, ModelArguments from .data_utils import DatasetModule from .parser import DatasetAttr from .processor import DatasetProcessor from .template import Template logger = logging.get_logger(__name__) def _load_single_dataset( dataset_attr: "DatasetAttr", model_args: "ModelArguments", data_args: "DataArguments", training_args: "Seq2SeqTrainingArguments", ) -> Union["Dataset", "IterableDataset"]: r"""Load a single dataset and aligns it to the standard format.""" logger.info_rank0(f"Loading dataset {dataset_attr}...") data_path, data_name, data_dir, data_files = None, None, None, None if dataset_attr.load_from in ["hf_hub", "ms_hub", "om_hub"]: data_path = dataset_attr.dataset_name data_name = dataset_attr.subset data_dir = dataset_attr.folder elif dataset_attr.load_from == "script": data_path = os.path.join(data_args.dataset_dir, dataset_attr.dataset_name) data_name = dataset_attr.subset data_dir = dataset_attr.folder elif dataset_attr.load_from == "cloud_file": data_path = dataset_attr.dataset_name elif dataset_attr.load_from == "file": data_files = [] local_path = os.path.join(data_args.dataset_dir, dataset_attr.dataset_name) if os.path.isdir(local_path): # is directory for file_name in os.listdir(local_path): data_files.append(os.path.join(local_path, file_name)) elif os.path.isfile(local_path): # is file data_files.append(local_path) else: raise ValueError(f"File {local_path} not found.") data_path = FILEEXT2TYPE.get(os.path.splitext(data_files[0])[-1][1:], None) if data_path is None: raise ValueError("Allowed file types: {}.".format(",".join(FILEEXT2TYPE.keys()))) if any(data_path != FILEEXT2TYPE.get(os.path.splitext(data_file)[-1][1:], None) for data_file in data_files): raise ValueError("File types should be identical.") else: raise NotImplementedError(f"Unknown load type: {dataset_attr.load_from}.") if dataset_attr.load_from == "ms_hub": check_version("modelscope>=1.14.0", mandatory=True) from modelscope import MsDataset # type: ignore from modelscope.utils.config_ds import MS_DATASETS_CACHE # type: ignore cache_dir = model_args.cache_dir or MS_DATASETS_CACHE dataset = MsDataset.load( dataset_name=data_path, subset_name=data_name, data_dir=data_dir, data_files=data_files, split=dataset_attr.split, cache_dir=cache_dir, token=model_args.ms_hub_token, use_streaming=data_args.streaming, ) if isinstance(dataset, MsDataset): dataset = dataset.to_hf_dataset() elif dataset_attr.load_from == "om_hub": check_version("openmind>=0.8.0", mandatory=True) from openmind import OmDataset # type: ignore from openmind.utils.hub import OM_DATASETS_CACHE # type: ignore cache_dir = model_args.cache_dir or OM_DATASETS_CACHE dataset = OmDataset.load_dataset( path=data_path, name=data_name, data_dir=data_dir, data_files=data_files, split=dataset_attr.split, cache_dir=cache_dir, token=model_args.om_hub_token, streaming=data_args.streaming, ) elif dataset_attr.load_from == "cloud_file": dataset = Dataset.from_list(read_cloud_json(data_path), split=dataset_attr.split) else: dataset = load_dataset( path=data_path, name=data_name, data_dir=data_dir, data_files=data_files, split=dataset_attr.split, cache_dir=model_args.cache_dir, token=model_args.hf_hub_token, num_proc=data_args.preprocessing_num_workers, streaming=data_args.streaming and dataset_attr.load_from != "file", ) if data_args.streaming and dataset_attr.load_from == "file": dataset = dataset.to_iterable_dataset(num_shards=training_args.dataloader_num_workers) if dataset_attr.num_samples is not None and not data_args.streaming: target_num = dataset_attr.num_samples indexes = np.random.permutation(len(dataset))[:target_num] # all samples should be included target_num -= len(indexes) if target_num > 0: expand_indexes = np.random.choice(len(dataset), target_num) indexes = np.concatenate((indexes, expand_indexes), axis=0) assert len(indexes) == dataset_attr.num_samples, "Sample num mismatched." dataset = dataset.select(indexes) logger.info_rank0(f"Sampled {dataset_attr.num_samples} examples from dataset {dataset_attr}.") if data_args.max_samples is not None: # truncate dataset max_samples = min(data_args.max_samples, len(dataset)) dataset = dataset.select(range(max_samples)) return align_dataset(dataset, dataset_attr, data_args, training_args) def _get_merged_dataset( dataset_names: list[str] | None, model_args: "ModelArguments", data_args: "DataArguments", training_args: "Seq2SeqTrainingArguments", stage: Literal["pt", "sft", "rm", "ppo", "kto"], return_dict: bool = False, ) -> Union["Dataset", "IterableDataset", dict[str, "Dataset"]] | None: r"""Return the merged datasets in the standard format.""" if dataset_names is None: return None datasets = {} for dataset_name, dataset_attr in zip(dataset_names, get_dataset_list(dataset_names, data_args.dataset_dir)): if (stage == "rm" and dataset_attr.ranking is False) or (stage != "rm" and dataset_attr.ranking is True): raise ValueError("The dataset is not applicable in the current training stage.") datasets[dataset_name] = _load_single_dataset(dataset_attr, model_args, data_args, training_args) if return_dict: return datasets else: return merge_dataset(list(datasets.values()), data_args, seed=training_args.seed) def _get_dataset_processor( data_args: "DataArguments", stage: Literal["pt", "sft", "rm", "ppo", "kto"], template: "Template", tokenizer: "PreTrainedTokenizer", processor: Optional["ProcessorMixin"], do_generate: bool = False, ) -> "DatasetProcessor": r"""Return the corresponding dataset processor.""" if stage == "pt": dataset_processor_class = PretrainDatasetProcessor elif stage == "sft" and not do_generate: if data_args.packing: if data_args.neat_packing: # hack datasets to have int32 attention mask from datasets.arrow_writer import OptimizedTypedSequence, TypedSequence def __init__(self, data, **kwargs): return TypedSequence.__init__( self, data, type=kwargs.pop("type", None), try_type=kwargs.pop("try_type", None), optimized_int_type=kwargs.pop("optimized_int_type", None), ) OptimizedTypedSequence.__init__ = __init__ dataset_processor_class = PackedSupervisedDatasetProcessor else: dataset_processor_class = SupervisedDatasetProcessor elif stage == "rm": dataset_processor_class = PairwiseDatasetProcessor elif stage == "kto": dataset_processor_class = FeedbackDatasetProcessor else: dataset_processor_class = UnsupervisedDatasetProcessor return dataset_processor_class(template=template, tokenizer=tokenizer, processor=processor, data_args=data_args) def _get_preprocessed_dataset( dataset: Union["Dataset", "IterableDataset"] | None, data_args: "DataArguments", training_args: "Seq2SeqTrainingArguments", stage: Literal["pt", "sft", "rm", "ppo", "kto"], template: "Template", tokenizer: "PreTrainedTokenizer", processor: Optional["ProcessorMixin"] = None, is_eval: bool = False, ) -> Union["Dataset", "IterableDataset"] | None: r"""Preprocesses the dataset, including format checking and tokenization.""" if dataset is None: return None dataset_processor = _get_dataset_processor( data_args, stage, template, tokenizer, processor, do_generate=(training_args.predict_with_generate and is_eval) ) column_names = list(next(iter(dataset)).keys()) kwargs = {} if not data_args.streaming: kwargs = dict( num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache) or (training_args.local_process_index != 0), desc="Running tokenizer on dataset", ) dataset = dataset.map( dataset_processor.preprocess_dataset, batched=True, batch_size=data_args.preprocessing_batch_size, remove_columns=column_names, **kwargs, ) if training_args.should_log: try: print("eval example:" if is_eval else "training example:") dataset_processor.print_data_example(next(iter(dataset))) except StopIteration: if stage == "pt": raise RuntimeError("Cannot find sufficient samples, consider increasing dataset size.") else: raise RuntimeError("Cannot find valid samples, check `data/README.md` for the data format.") return dataset def get_dataset( template: "Template", model_args: "ModelArguments", data_args: "DataArguments", training_args: "Seq2SeqTrainingArguments", stage: Literal["pt", "sft", "rm", "ppo", "kto"], tokenizer: "PreTrainedTokenizer", processor: Optional["ProcessorMixin"] = None, ) -> "DatasetModule": r"""Get the train dataset and optionally gets the evaluation dataset.""" # Load tokenized dataset if path exists if data_args.tokenized_path is not None: if has_tokenized_data(data_args.tokenized_path): logger.warning_rank0("Loading dataset from disk will ignore other data arguments.") tokenized_data = load_from_disk(data_args.tokenized_path) dataset_module = get_dataset_module(tokenized_data) if data_args.streaming: dataset_module["train_dataset"] = dataset_module["train_dataset"].to_iterable_dataset() logger.info_rank0(f"Loaded tokenized dataset from {data_args.tokenized_path}.") return dataset_module if data_args.streaming: raise ValueError("Turn off `streaming` when saving dataset to disk.") # Load and preprocess dataset with training_args.main_process_first(desc="load dataset", local=(not data_args.data_shared_file_system)): dataset = _get_merged_dataset(data_args.dataset, model_args, data_args, training_args, stage) eval_dataset = _get_merged_dataset( data_args.eval_dataset, model_args, data_args, training_args, stage, return_dict=data_args.eval_on_each_dataset, ) with training_args.main_process_first(desc="pre-process dataset", local=(not data_args.data_shared_file_system)): # move front to make sure eval_dataset(if contain or split) can preprocessed appropriately train_dict, eval_dict = split_dataset(dataset, eval_dataset, data_args, seed=training_args.seed) if "train" in train_dict: train_dict["train"] = _get_preprocessed_dataset( train_dict["train"], data_args, training_args, stage, template, tokenizer, processor, is_eval=False ) for key in eval_dict: eval_dict[key] = _get_preprocessed_dataset( eval_dict[key], data_args, training_args, stage, template, tokenizer, processor, is_eval=True ) # Combine train and eval dictionaries dataset_dict = DatasetDict({**train_dict, **eval_dict}) if data_args.tokenized_path is not None: # save tokenized dataset to disk if training_args.should_save: dataset_dict.save_to_disk(data_args.tokenized_path) logger.info_rank0(f"Tokenized dataset is saved at {data_args.tokenized_path}.") logger.info_rank0(f"Please launch the training with `tokenized_path: {data_args.tokenized_path}`.") return get_dataset_module(dataset_dict)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/parser.py
src/llamafactory/data/parser.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from dataclasses import dataclass from typing import Any, Literal from huggingface_hub import hf_hub_download from ..extras.constants import DATA_CONFIG from ..extras.misc import use_modelscope, use_openmind @dataclass class DatasetAttr: r"""Dataset attributes.""" # basic configs load_from: Literal["hf_hub", "ms_hub", "om_hub", "script", "file"] dataset_name: str formatting: Literal["alpaca", "sharegpt", "openai"] = "alpaca" ranking: bool = False # extra configs subset: str | None = None split: str = "train" folder: str | None = None num_samples: int | None = None # common columns system: str | None = None tools: str | None = None images: str | None = None videos: str | None = None audios: str | None = None # dpo columns chosen: str | None = None rejected: str | None = None kto_tag: str | None = None # alpaca columns prompt: str | None = "instruction" query: str | None = "input" response: str | None = "output" history: str | None = None # sharegpt columns messages: str | None = "conversations" # sharegpt tags role_tag: str | None = "from" content_tag: str | None = "value" user_tag: str | None = "human" assistant_tag: str | None = "gpt" observation_tag: str | None = "observation" function_tag: str | None = "function_call" system_tag: str | None = "system" def __repr__(self) -> str: return self.dataset_name def set_attr(self, key: str, obj: dict[str, Any], default: Any | None = None) -> None: setattr(self, key, obj.get(key, default)) def join(self, attr: dict[str, Any]) -> None: self.set_attr("formatting", attr, default="alpaca") self.set_attr("ranking", attr, default=False) self.set_attr("subset", attr) self.set_attr("split", attr, default="train") self.set_attr("folder", attr) self.set_attr("num_samples", attr) if "columns" in attr: column_names = ["prompt", "query", "response", "history", "messages", "system", "tools"] column_names += ["images", "videos", "audios", "chosen", "rejected", "kto_tag"] for column_name in column_names: self.set_attr(column_name, attr["columns"]) if "tags" in attr: tag_names = ["role_tag", "content_tag"] tag_names += ["user_tag", "assistant_tag", "observation_tag", "function_tag", "system_tag"] for tag in tag_names: self.set_attr(tag, attr["tags"]) def get_dataset_list(dataset_names: list[str] | None, dataset_dir: str | dict) -> list["DatasetAttr"]: r"""Get the attributes of the datasets.""" if dataset_names is None: dataset_names = [] if isinstance(dataset_dir, dict): dataset_info = dataset_dir elif dataset_dir == "ONLINE": dataset_info = None else: if dataset_dir.startswith("REMOTE:"): config_path = hf_hub_download(repo_id=dataset_dir[7:], filename=DATA_CONFIG, repo_type="dataset") else: config_path = os.path.join(dataset_dir, DATA_CONFIG) try: with open(config_path) as f: dataset_info = json.load(f) except Exception as err: if len(dataset_names) != 0: raise ValueError(f"Cannot open {config_path} due to {str(err)}.") dataset_info = None dataset_list: list[DatasetAttr] = [] for name in dataset_names: if dataset_info is None: # dataset_dir is ONLINE load_from = "ms_hub" if use_modelscope() else "om_hub" if use_openmind() else "hf_hub" dataset_attr = DatasetAttr(load_from, dataset_name=name) dataset_list.append(dataset_attr) continue if name not in dataset_info: raise ValueError(f"Undefined dataset {name} in {DATA_CONFIG}.") has_hf_url = "hf_hub_url" in dataset_info[name] has_ms_url = "ms_hub_url" in dataset_info[name] has_om_url = "om_hub_url" in dataset_info[name] if has_hf_url or has_ms_url or has_om_url: if has_ms_url and (use_modelscope() or not has_hf_url): dataset_attr = DatasetAttr("ms_hub", dataset_name=dataset_info[name]["ms_hub_url"]) elif has_om_url and (use_openmind() or not has_hf_url): dataset_attr = DatasetAttr("om_hub", dataset_name=dataset_info[name]["om_hub_url"]) else: dataset_attr = DatasetAttr("hf_hub", dataset_name=dataset_info[name]["hf_hub_url"]) elif "script_url" in dataset_info[name]: dataset_attr = DatasetAttr("script", dataset_name=dataset_info[name]["script_url"]) elif "cloud_file_name" in dataset_info[name]: dataset_attr = DatasetAttr("cloud_file", dataset_name=dataset_info[name]["cloud_file_name"]) else: dataset_attr = DatasetAttr("file", dataset_name=dataset_info[name]["file_name"]) dataset_attr.join(dataset_info[name]) dataset_list.append(dataset_attr) return dataset_list
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/template.py
src/llamafactory/data/template.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from copy import deepcopy from dataclasses import dataclass from typing import TYPE_CHECKING, Optional, Union from typing_extensions import override from ..extras import logging from .data_utils import Role from .formatter import EmptyFormatter, FunctionFormatter, StringFormatter, ToolFormatter from .mm_plugin import get_mm_plugin if TYPE_CHECKING: from transformers import PreTrainedTokenizer from ..hparams import DataArguments from .formatter import SLOTS, Formatter from .mm_plugin import BasePlugin from .tool_utils import FunctionCall logger = logging.get_logger(__name__) @dataclass class Template: format_user: "Formatter" format_assistant: "Formatter" format_system: "Formatter" format_function: "Formatter" format_observation: "Formatter" format_tools: "Formatter" format_prefix: "Formatter" default_system: str stop_words: list[str] thought_words: tuple[str, str] tool_call_words: tuple[str, str] efficient_eos: bool replace_eos: bool replace_jinja_template: bool enable_thinking: Optional[bool] mm_plugin: "BasePlugin" def encode_oneturn( self, tokenizer: "PreTrainedTokenizer", messages: list[dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, ) -> tuple[list[int], list[int]]: r"""Return a single pair of token ids representing prompt and response respectively.""" encoded_messages = self._encode(tokenizer, messages, system, tools) prompt_ids = [] for encoded_ids in encoded_messages[:-1]: prompt_ids += encoded_ids response_ids = encoded_messages[-1] return prompt_ids, response_ids def encode_multiturn( self, tokenizer: "PreTrainedTokenizer", messages: list[dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, ) -> list[tuple[list[int], list[int]]]: r"""Return multiple pairs of token ids representing prompts and responses respectively.""" encoded_messages = self._encode(tokenizer, messages, system, tools) return [(encoded_messages[i], encoded_messages[i + 1]) for i in range(0, len(encoded_messages), 2)] def extract_tool(self, content: str) -> Union[str, list["FunctionCall"]]: r"""Extract tool message.""" return self.format_tools.extract(content) def get_stop_token_ids(self, tokenizer: "PreTrainedTokenizer") -> list[int]: r"""Return stop token ids.""" stop_token_ids = {tokenizer.eos_token_id} for token in self.stop_words: stop_token_ids.add(tokenizer.convert_tokens_to_ids(token)) return list(stop_token_ids) def add_thought(self, content: str = "") -> str: r"""Add empty thought to assistant message.""" return f"{self.thought_words[0]}{self.thought_words[1]}" + content def remove_thought(self, content: str) -> str: r"""Remove thought from assistant message.""" pattern = re.compile(f"{re.escape(self.thought_words[0])}(.*?){re.escape(self.thought_words[1])}", re.DOTALL) return re.sub(pattern, "", content).lstrip("\n") def get_thought_word_ids(self, tokenizer: "PreTrainedTokenizer") -> list[int]: r"""Get the token ids of thought words.""" return tokenizer.encode(self.add_thought(), add_special_tokens=False) def _convert_elements_to_ids(self, tokenizer: "PreTrainedTokenizer", elements: "SLOTS") -> list[int]: r"""Convert elements to token ids.""" token_ids = [] for elem in elements: if isinstance(elem, str): if len(elem) != 0: token_ids += tokenizer.encode(elem, add_special_tokens=False) elif isinstance(elem, dict): token_ids += [tokenizer.convert_tokens_to_ids(elem.get("token"))] elif isinstance(elem, set): if "bos_token" in elem and tokenizer.bos_token_id is not None: token_ids += [tokenizer.bos_token_id] elif "eos_token" in elem and tokenizer.eos_token_id is not None: token_ids += [tokenizer.eos_token_id] else: raise ValueError(f"Input must be string, set[str] or dict[str, str], got {type(elem)}") return token_ids def _encode( self, tokenizer: "PreTrainedTokenizer", messages: list[dict[str, str]], system: Optional[str], tools: Optional[str], ) -> list[list[int]]: r"""Encode formatted inputs to pairs of token ids. Turn 0: prefix + system + query resp Turn t: query resp. """ system = system or self.default_system encoded_messages = [] for i, message in enumerate(messages): elements = [] if i == 0: elements += self.format_prefix.apply() if system or tools: tool_text = self.format_tools.apply(content=tools)[0] if tools else "" elements += self.format_system.apply(content=(system + tool_text)) if message["role"] == Role.USER: elements += self.format_user.apply(content=message["content"], idx=str(i // 2)) elif message["role"] == Role.ASSISTANT: elements += self.format_assistant.apply(content=message["content"]) elif message["role"] == Role.OBSERVATION: elements += self.format_observation.apply(content=message["content"]) elif message["role"] == Role.FUNCTION: elements += self.format_function.apply( content=message["content"], thought_words=self.thought_words, tool_call_words=self.tool_call_words ) else: raise NotImplementedError("Unexpected role: {}".format(message["role"])) encoded_messages.append(self._convert_elements_to_ids(tokenizer, elements)) return encoded_messages @staticmethod def _add_or_replace_eos_token(tokenizer: "PreTrainedTokenizer", eos_token: str) -> None: r"""Add or replace eos token to the tokenizer.""" if tokenizer.eos_token == eos_token: return is_added = tokenizer.eos_token_id is None num_added_tokens = tokenizer.add_special_tokens({"eos_token": eos_token}) if is_added: logger.info_rank0(f"Add eos token: {tokenizer.eos_token}.") else: logger.info_rank0(f"Replace eos token: {tokenizer.eos_token}.") if num_added_tokens > 0: logger.warning_rank0("New tokens have been added, make sure `resize_vocab` is True.") def fix_special_tokens(self, tokenizer: "PreTrainedTokenizer") -> None: r"""Add eos token and pad token to the tokenizer.""" stop_words = self.stop_words if self.replace_eos: if not stop_words: raise ValueError("Stop words are required to replace the EOS token.") self._add_or_replace_eos_token(tokenizer, eos_token=stop_words[0]) stop_words = stop_words[1:] if tokenizer.eos_token_id is None: self._add_or_replace_eos_token(tokenizer, eos_token="<|endoftext|>") if tokenizer.pad_token_id is None: tokenizer.pad_token = tokenizer.eos_token logger.info_rank0(f"Add pad token: {tokenizer.pad_token}") if stop_words: try: num_added_tokens = tokenizer.add_special_tokens( dict(additional_special_tokens=stop_words), replace_additional_special_tokens=False ) except TypeError: num_added_tokens = tokenizer.add_special_tokens(dict(additional_special_tokens=stop_words)) logger.info_rank0("Add {} to stop words.".format(",".join(stop_words))) if num_added_tokens > 0: logger.warning_rank0("New tokens have been added, make sure `resize_vocab` is True.") @staticmethod def _jinja_escape(content: str) -> str: r"""Escape single quotes in content.""" return content.replace("'", r"\'") @staticmethod def _convert_slots_to_jinja(slots: "SLOTS", tokenizer: "PreTrainedTokenizer", placeholder: str = "content") -> str: r"""Convert slots to jinja template.""" slot_items = [] for slot in slots: if isinstance(slot, str): slot_pieces = slot.split("{{content}}") if slot_pieces[0]: slot_items.append("'" + Template._jinja_escape(slot_pieces[0]) + "'") if len(slot_pieces) > 1: slot_items.append(placeholder) if slot_pieces[1]: slot_items.append("'" + Template._jinja_escape(slot_pieces[1]) + "'") elif isinstance(slot, set): # do not use {{ eos_token }} since it may be replaced if "bos_token" in slot and tokenizer.bos_token_id is not None: slot_items.append("'" + tokenizer.bos_token + "'") elif "eos_token" in slot and tokenizer.eos_token_id is not None: slot_items.append("'" + tokenizer.eos_token + "'") elif isinstance(slot, dict): raise ValueError("Dict is not supported.") return " + ".join(slot_items) def _get_jinja_template(self, tokenizer: "PreTrainedTokenizer") -> str: r"""Return the jinja template.""" prefix = self._convert_slots_to_jinja(self.format_prefix.apply(), tokenizer) system = self._convert_slots_to_jinja(self.format_system.apply(), tokenizer, placeholder="system_message") user = self._convert_slots_to_jinja(self.format_user.apply(), tokenizer) assistant = self._convert_slots_to_jinja(self.format_assistant.apply(), tokenizer) jinja_template = "" if prefix: jinja_template += "{{ " + prefix + " }}" if self.default_system: jinja_template += "{% set system_message = '" + self._jinja_escape(self.default_system) + "' %}" jinja_template += ( "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}" "{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}" "{% if system_message is defined %}{{ " + system + " }}{% endif %}" "{% for message in loop_messages %}" "{% set content = message['content'] %}" "{% if message['role'] == 'user' %}" "{{ " + user + " }}" "{% elif message['role'] == 'assistant' %}" "{{ " + assistant + " }}" "{% endif %}" "{% endfor %}" ) return jinja_template def fix_jinja_template(self, tokenizer: "PreTrainedTokenizer") -> None: r"""Replace the jinja template in the tokenizer.""" if tokenizer.chat_template is None or self.replace_jinja_template: try: tokenizer.chat_template = self._get_jinja_template(tokenizer) except ValueError as e: logger.info_rank0(f"Cannot add this chat template to tokenizer: {e}.") @staticmethod def _convert_slots_to_ollama( slots: "SLOTS", tokenizer: "PreTrainedTokenizer", placeholder: str = "content" ) -> str: r"""Convert slots to ollama template.""" slot_items = [] for slot in slots: if isinstance(slot, str): slot_pieces = slot.split("{{content}}") if slot_pieces[0]: slot_items.append(slot_pieces[0]) if len(slot_pieces) > 1: slot_items.append("{{ " + placeholder + " }}") if slot_pieces[1]: slot_items.append(slot_pieces[1]) elif isinstance(slot, set): # do not use {{ eos_token }} since it may be replaced if "bos_token" in slot and tokenizer.bos_token_id is not None: slot_items.append(tokenizer.bos_token) elif "eos_token" in slot and tokenizer.eos_token_id is not None: slot_items.append(tokenizer.eos_token) elif isinstance(slot, dict): raise ValueError("Dict is not supported.") return "".join(slot_items) def _get_ollama_template(self, tokenizer: "PreTrainedTokenizer") -> str: r"""Return the ollama template.""" prefix = self._convert_slots_to_ollama(self.format_prefix.apply(), tokenizer) system = self._convert_slots_to_ollama(self.format_system.apply(), tokenizer, placeholder=".System") user = self._convert_slots_to_ollama(self.format_user.apply(), tokenizer, placeholder=".Content") assistant = self._convert_slots_to_ollama(self.format_assistant.apply(), tokenizer, placeholder=".Content") return ( f"{prefix}{{{{ if .System }}}}{system}{{{{ end }}}}" f"""{{{{ range .Messages }}}}{{{{ if eq .Role "user" }}}}{user}""" f"""{{{{ else if eq .Role "assistant" }}}}{assistant}{{{{ end }}}}{{{{ end }}}}""" ) def get_ollama_modelfile(self, tokenizer: "PreTrainedTokenizer") -> str: r"""Return the ollama modelfile. TODO: support function calling. """ modelfile = "# ollama modelfile auto-generated by llamafactory\n\n" modelfile += f'FROM .\n\nTEMPLATE """{self._get_ollama_template(tokenizer)}"""\n\n' if self.default_system: modelfile += f'SYSTEM """{self.default_system}"""\n\n' for stop_token_id in self.get_stop_token_ids(tokenizer): modelfile += f'PARAMETER stop "{tokenizer.convert_ids_to_tokens(stop_token_id)}"\n' modelfile += "PARAMETER num_ctx 4096\n" return modelfile @dataclass class Llama2Template(Template): r"""A template that fuse the system message to first user message.""" @override def _encode( self, tokenizer: "PreTrainedTokenizer", messages: list[dict[str, str]], system: str, tools: str, ) -> list[list[int]]: system = system or self.default_system encoded_messages = [] for i, message in enumerate(messages): elements = [] system_text = "" if i == 0: elements += self.format_prefix.apply() if system or tools: tool_text = self.format_tools.apply(content=tools)[0] if tools else "" system_text = self.format_system.apply(content=(system + tool_text))[0] if message["role"] == Role.USER: elements += self.format_user.apply(content=system_text + message["content"]) elif message["role"] == Role.ASSISTANT: elements += self.format_assistant.apply(content=message["content"]) elif message["role"] == Role.OBSERVATION: elements += self.format_observation.apply(content=message["content"]) elif message["role"] == Role.FUNCTION: elements += self.format_function.apply(content=message["content"]) else: raise NotImplementedError("Unexpected role: {}".format(message["role"])) encoded_messages.append(self._convert_elements_to_ids(tokenizer, elements)) return encoded_messages def _get_jinja_template(self, tokenizer: "PreTrainedTokenizer") -> str: prefix = self._convert_slots_to_jinja(self.format_prefix.apply(), tokenizer) system_message = self._convert_slots_to_jinja( self.format_system.apply(), tokenizer, placeholder="system_message" ) user_message = self._convert_slots_to_jinja(self.format_user.apply(), tokenizer) assistant_message = self._convert_slots_to_jinja(self.format_assistant.apply(), tokenizer) jinja_template = "" if prefix: jinja_template += "{{ " + prefix + " }}" if self.default_system: jinja_template += "{% set system_message = '" + self._jinja_escape(self.default_system) + "' %}" jinja_template += ( "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}" "{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}" "{% for message in loop_messages %}" "{% if loop.index0 == 0 and system_message is defined %}" "{% set content = " + system_message + " + message['content'] %}" "{% else %}{% set content = message['content'] %}{% endif %}" "{% if message['role'] == 'user' %}" "{{ " + user_message + " }}" "{% elif message['role'] == 'assistant' %}" "{{ " + assistant_message + " }}" "{% endif %}" "{% endfor %}" ) return jinja_template @dataclass class ReasoningTemplate(Template): r"""A template that add thought to assistant message.""" @override def encode_oneturn( self, tokenizer: "PreTrainedTokenizer", messages: list[dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, ) -> tuple[list[int], list[int]]: messages = deepcopy(messages) for i in range(1, len(messages) - 2, 2): messages[i]["content"] = self.remove_thought(messages[i]["content"]) if self.enable_thinking is False: # remove all cot messages[-1]["content"] = self.remove_thought(messages[-1]["content"]) prompt_ids, response_ids = super().encode_oneturn(tokenizer, messages, system, tools) if ( self.thought_words[0].strip() not in messages[-1]["content"] and self.thought_words[1].strip() not in messages[-1]["content"] ): # add empty cot if not self.enable_thinking: # do not compute loss prompt_ids += self.get_thought_word_ids(tokenizer) else: # do compute loss response_ids = self.get_thought_word_ids(tokenizer) + response_ids return prompt_ids, response_ids @override def encode_multiturn( self, tokenizer: "PreTrainedTokenizer", messages: list[dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, ) -> list[tuple[list[int], list[int]]]: messages = deepcopy(messages) if self.enable_thinking is False: # remove all cot for i in range(1, len(messages), 2): messages[i]["content"] = self.remove_thought(messages[i]["content"]) encoded_messages = self._encode(tokenizer, messages, system, tools) for i in range(0, len(messages), 2): if ( self.thought_words[0].strip() not in messages[i + 1]["content"] and self.thought_words[1].strip() not in messages[i + 1]["content"] ): # add empty cot if not self.enable_thinking: # do not compute loss encoded_messages[i] += self.get_thought_word_ids(tokenizer) else: # do compute loss encoded_messages[i + 1] = self.get_thought_word_ids(tokenizer) + encoded_messages[i + 1] return [(encoded_messages[i], encoded_messages[i + 1]) for i in range(0, len(encoded_messages), 2)] TEMPLATES: dict[str, "Template"] = {} def register_template( name: str, format_user: Optional["Formatter"] = None, format_assistant: Optional["Formatter"] = None, format_system: Optional["Formatter"] = None, format_function: Optional["Formatter"] = None, format_observation: Optional["Formatter"] = None, format_tools: Optional["Formatter"] = None, format_prefix: Optional["Formatter"] = None, default_system: str = "", stop_words: Optional[list[str]] = None, thought_words: Optional[tuple[str, str]] = None, tool_call_words: Optional[tuple[str, str]] = None, efficient_eos: bool = False, replace_eos: bool = False, replace_jinja_template: bool = False, enable_thinking: Optional[bool] = True, mm_plugin: "BasePlugin" = get_mm_plugin(name="base"), template_class: type["Template"] = Template, ) -> None: r"""Register a chat template. To add the following chat template: ``` <s><user>user prompt here <model>model response here</s> <user>user prompt here <model>model response here</s> ``` The corresponding code should be: ``` register_template( name="custom", format_user=StringFormatter(slots=["<user>{{content}}\n<model>"]), format_assistant=StringFormatter(slots=["{{content}}</s>\n"]), format_prefix=EmptyFormatter("<s>"), ) ``` """ if name in TEMPLATES: raise ValueError(f"Template {name} already exists.") default_slots = ["{{content}}"] if efficient_eos else ["{{content}}", {"eos_token"}] default_user_formatter = StringFormatter(slots=["{{content}}"]) default_assistant_formatter = StringFormatter(slots=default_slots) if format_assistant is not None: default_function_formatter = FunctionFormatter(slots=format_assistant.slots, tool_format="default") else: default_function_formatter = FunctionFormatter(slots=default_slots, tool_format="default") default_tool_formatter = ToolFormatter(tool_format="default") default_prefix_formatter = EmptyFormatter() TEMPLATES[name] = template_class( format_user=format_user or default_user_formatter, format_assistant=format_assistant or default_assistant_formatter, format_system=format_system or default_user_formatter, format_function=format_function or default_function_formatter, format_observation=format_observation or format_user or default_user_formatter, format_tools=format_tools or default_tool_formatter, format_prefix=format_prefix or default_prefix_formatter, default_system=default_system, stop_words=stop_words or [], thought_words=thought_words or ("<think>\n", "\n</think>\n\n"), tool_call_words=tool_call_words or ("<tool_call>", "</tool_call>"), efficient_eos=efficient_eos, replace_eos=replace_eos, replace_jinja_template=replace_jinja_template, enable_thinking=enable_thinking, mm_plugin=mm_plugin, ) def parse_template(tokenizer: "PreTrainedTokenizer") -> "Template": r"""Extract a chat template from the tokenizer.""" def find_diff(short_str: str, long_str: str) -> str: i, j = 0, 0 diff = "" while i < len(short_str) and j < len(long_str): if short_str[i] == long_str[j]: i += 1 j += 1 else: diff += long_str[j] j += 1 return diff prefix = tokenizer.decode(tokenizer.encode("")) messages = [{"role": "system", "content": "{{content}}"}] system_slot = tokenizer.apply_chat_template(messages, add_generation_prompt=False, tokenize=False)[len(prefix) :] messages = [{"role": "system", "content": ""}, {"role": "user", "content": "{{content}}"}] user_slot_empty_system = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) user_slot_empty_system = user_slot_empty_system[len(prefix) :] messages = [{"role": "user", "content": "{{content}}"}] user_slot = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) user_slot = user_slot[len(prefix) :] messages = [{"role": "user", "content": "{{content}}"}, {"role": "assistant", "content": "{{content}}"}] assistant_slot = tokenizer.apply_chat_template(messages, add_generation_prompt=False, tokenize=False) assistant_slot = assistant_slot[len(prefix) + len(user_slot) :] template_class = ReasoningTemplate if "<think>" in assistant_slot else Template assistant_slot = assistant_slot.replace("<think>", "").replace("</think>", "").lstrip("\n") # remove thought tags if len(user_slot) > len(user_slot_empty_system): default_system = find_diff(user_slot_empty_system, user_slot) sole_system = system_slot.replace("{{content}}", default_system, 1) user_slot = user_slot[len(sole_system) :] else: # if defaut_system is empty, user_slot_empty_system will be longer than user_slot default_system = "" return template_class( format_user=StringFormatter(slots=[user_slot]), format_assistant=StringFormatter(slots=[assistant_slot]), format_system=StringFormatter(slots=[system_slot]), format_function=FunctionFormatter(slots=[assistant_slot], tool_format="default"), format_observation=StringFormatter(slots=[user_slot]), format_tools=ToolFormatter(tool_format="default"), format_prefix=EmptyFormatter(slots=[prefix]) if prefix else EmptyFormatter(), default_system=default_system, stop_words=[], thought_words=("<think>\n", "\n</think>\n\n"), tool_call_words=("<tool_call>", "</tool_call>"), efficient_eos=False, replace_eos=False, replace_jinja_template=False, enable_thinking=True, mm_plugin=get_mm_plugin(name="base"), ) def get_template_and_fix_tokenizer(tokenizer: "PreTrainedTokenizer", data_args: "DataArguments") -> "Template": r"""Get chat template and fixes the tokenizer.""" if data_args.template is None: if isinstance(tokenizer.chat_template, str): logger.warning_rank0("`template` was not specified, try parsing the chat template from the tokenizer.") template = parse_template(tokenizer) else: logger.warning_rank0("`template` was not specified, use `empty` template.") template = TEMPLATES["empty"] # placeholder else: if data_args.template not in TEMPLATES: raise ValueError(f"Template {data_args.template} does not exist.") template = TEMPLATES[data_args.template] if data_args.train_on_prompt and template.efficient_eos: raise ValueError("Current template does not support `train_on_prompt`.") if data_args.tool_format is not None: logger.info_rank0(f"Using tool format: {data_args.tool_format}.") default_slots = ["{{content}}"] if template.efficient_eos else ["{{content}}", {"eos_token"}] template.format_function = FunctionFormatter(slots=default_slots, tool_format=data_args.tool_format) template.format_tools = ToolFormatter(tool_format=data_args.tool_format) if data_args.default_system is not None: logger.info_rank0(f"Using default system message: {data_args.default_system}.") template.default_system = data_args.default_system if isinstance(template, ReasoningTemplate): logger.warning_rank0( "You are using reasoning template, " "please add `_nothink` suffix if the model is not a reasoning model. " "e.g., qwen3_vl_nothink" ) template.enable_thinking = data_args.enable_thinking template.fix_special_tokens(tokenizer) template.fix_jinja_template(tokenizer) return template register_template( name="alpaca", format_user=StringFormatter(slots=["### Instruction:\n{{content}}\n\n### Response:\n"]), format_assistant=StringFormatter(slots=["{{content}}", {"eos_token"}, "\n\n"]), default_system=( "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n" ), replace_jinja_template=True, ) register_template( name="aquila", format_user=StringFormatter(slots=["Human: {{content}}###Assistant:"]), format_assistant=StringFormatter(slots=["{{content}}###"]), format_system=StringFormatter(slots=["System: {{content}}###"]), default_system=( "A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the human's questions." ), stop_words=["</s>"], ) register_template( name="atom", format_user=StringFormatter( slots=[{"bos_token"}, "Human: {{content}}\n", {"eos_token"}, {"bos_token"}, "Assistant:"] ), format_assistant=StringFormatter(slots=["{{content}}\n", {"eos_token"}]), ) register_template( name="baichuan", format_user=StringFormatter(slots=[{"token": "<reserved_102>"}, "{{content}}", {"token": "<reserved_103>"}]), efficient_eos=True, ) register_template( name="baichuan2", format_user=StringFormatter(slots=["<reserved_106>{{content}}<reserved_107>"]), efficient_eos=True, ) register_template( name="bailing", format_user=StringFormatter(slots=["<role>HUMAN</role>{{content}}<role>ASSISTANT</role>"]), format_system=StringFormatter(slots=["<role>SYSTEM</role>{{content}}"]), format_observation=StringFormatter(slots=["<role>OBSERVATION</role>{{content}}<role>ASSISTANT</role>"]), stop_words=["<|endoftext|>"], efficient_eos=True, ) register_template( name="bailing_v2", format_user=StringFormatter(slots=["<role>HUMAN</role>{{content}}<|role_end|><role>ASSISTANT</role>"]), format_system=StringFormatter(slots=["<role>SYSTEM</role>{{content}}<|role_end|>"]), format_assistant=StringFormatter(slots=["{{content}}<|role_end|>"]), format_observation=StringFormatter( slots=[ "<role>OBSERVATION</role>\n<tool_response>\n{{content}}\n</tool_response><|role_end|><role>ASSISTANT</role>" ] ), format_function=FunctionFormatter(slots=["{{content}}<|role_end|>"], tool_format="ling"), format_tools=ToolFormatter(tool_format="ling"), stop_words=["<|endoftext|>"], efficient_eos=True, ) register_template( name="belle", format_user=StringFormatter(slots=["Human: {{content}}\n\nBelle: "]), format_assistant=StringFormatter(slots=["{{content}}", {"eos_token"}, "\n\n"]), format_prefix=EmptyFormatter(slots=[{"bos_token"}]), ) register_template( name="bluelm", format_user=StringFormatter(slots=[{"token": "[|Human|]:"}, "{{content}}", {"token": "[|AI|]:"}]), ) register_template( name="breeze", format_user=StringFormatter(slots=["[INST] {{content}} [/INST] "]), format_prefix=EmptyFormatter(slots=[{"bos_token"}]), efficient_eos=True, ) register_template( name="chatglm2", format_user=StringFormatter(slots=["[Round {{idx}}]\n\n问:{{content}}\n\n答:"]), format_prefix=EmptyFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}]), efficient_eos=True, ) register_template( name="chatglm3", format_user=StringFormatter(slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]), format_assistant=StringFormatter(slots=["\n", "{{content}}"]), format_system=StringFormatter(slots=[{"token": "<|system|>"}, "\n", "{{content}}"]), format_function=FunctionFormatter(slots=["{{content}}"], tool_format="glm4"), format_observation=StringFormatter( slots=[{"token": "<|observation|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}] ), format_tools=ToolFormatter(tool_format="glm4"), format_prefix=EmptyFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}]), stop_words=["<|user|>", "<|observation|>"], efficient_eos=True, ) register_template( name="chatml", format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]), format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]), format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), stop_words=["<|im_end|>", "<|im_start|>"], replace_eos=True, replace_jinja_template=True, ) # copied from chatml template register_template( name="chatml_de",
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
true
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/tool_utils.py
src/llamafactory/data/tool_utils.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import re from abc import ABC, abstractmethod from dataclasses import dataclass from datetime import datetime from typing import Any, NamedTuple, Union from typing_extensions import override class FunctionCall(NamedTuple): name: str arguments: str DEFAULT_TOOL_PROMPT = ( "You have access to the following tools:\n{tool_text}" "Use the following format if using a tool:\n" "```\n" "Action: tool name (one of [{tool_names}])\n" "Action Input: the input to the tool, in a JSON format representing the kwargs " """(e.g. ```{{"input": "hello world", "num_beams": 5}}```)\n""" "```\n" ) GLM4_TOOL_PROMPT = ( "你是一个名为 ChatGLM 的人工智能助手。你是基于智谱 AI 公司训练的语言模型 GLM-4 模型开发的," "你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{tool_text}" ) GLM4_MOE_TOOL_PROMPT = ( "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\n" "You are provided with function signatures within <tools></tools> XML tags:\n<tools>{tool_text}" "\n</tools>\n\nFor each function call, output the function name and arguments within the following XML format:" "\n<tool_call>{{function-name}}" "\n<arg_key>{{arg-key-1}}</arg_key>" "\n<arg_value>{{arg-value-1}}</arg_value>" "\n<arg_key>{{arg-key-2}}</arg_key>" "\n<arg_value>{{arg-value-2}}</arg_value>" "\n...\n</tool_call>\n" ) LLAMA3_TOOL_PROMPT = ( "Cutting Knowledge Date: December 2023\nToday Date: {date}\n\n" "You have access to the following functions. To call a function, please respond with JSON for a function call. " """Respond in the format {{"name": function name, "parameters": dictionary of argument name and its value}}. """ "Do not use variables.\n\n{tool_text}" ) MINIMAX_M1_TOOL_PROMPT = ( "You are provided with these tools:\n<tools>\n{tool_text}</tools>\n\n" "If you need to call tools, please respond with <tool_calls></tool_calls> XML tags, and provide tool-name and " "json-object of arguments, following the format below:\n<tool_calls>\n" """{{"name": <tool-name-1>, "arguments": <args-json-object-1>}}\n...\n</tool_calls>""" ) MINIMAX_M2_TOOL_PROMPT = ( "\n\n# Tools\n\nYou may call one or more tools to assist with the user query.\n" "Here are the tools available in JSONSchema format:\n\n<tools>\n{tool_text}</tools>\n\n" "When making tool calls, use XML format to invoke tools and pass parameters:\n" """\n<minimax:tool_call>\n<invoke name="tool-name-1">\n<parameter name="param-key-1">param-value-1</parameter>\n""" """<parameter name="param-key-2">param-value-2</parameter>\n...\n</invoke>\n</minimax:tool_call>""" ) QWEN_TOOL_PROMPT = ( "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\n" "You are provided with function signatures within <tools></tools> XML tags:\n<tools>{tool_text}" "\n</tools>\n\nFor each function call, return a json object with function name and arguments within " """<tool_call></tool_call> XML tags:\n<tool_call>\n{{"name": <function-name>, """ """"arguments": <args-json-object>}}\n</tool_call>""" ) SEED_TOOL_PROMPT = ( "system\nYou are Doubao, a helpful AI assistant. You may call one or more functions to assist with the user query." "Tool List:\nYou are authorized to use the following tools (described in JSON Schema format). Before performing " "any task, you must decide how to call them based on the descriptions and parameters of these tools.{tool_text}\n" "工具调用请遵循如下格式:\n<seed:tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>value_1" "</parameter>\n<parameter=example_parameter_2>This is the value for the second parameter\nthat can span\nmultiple " "lines</parameter>\n</function>\n</seed:tool_call>\n" ) LING_TOOL_PROMPT = ( "# Tools\n\nYou may call one or more functions to assist with the user query.\n\n" "You are provided with function signatures within <tools></tools> XML tags:\n<tools>{tool_text}" "\n</tools>\n\nFor each function call, return a json object with function name and arguments within " """<tool_call></tool_call> XML tags:\n<tool_call>\n{{"name": <function-name>, """ """"arguments": <args-json-object>}}\n</tool_call>""" ) @dataclass class ToolUtils(ABC): """Base class for tool utilities.""" @staticmethod @abstractmethod def tool_formatter(tools: list[dict[str, Any]]) -> str: r"""Generate the system message describing all the available tools.""" ... @staticmethod @abstractmethod def function_formatter(functions: list["FunctionCall"]) -> str: r"""Generate the assistant message including all the tool calls.""" ... @staticmethod @abstractmethod def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]: r"""Extract all the function calls from the assistant message. It should be an inverse function of `function_formatter`. """ ... class DefaultToolUtils(ToolUtils): r"""Default tool using template.""" @override @staticmethod def tool_formatter(tools: list[dict[str, Any]]) -> str: tool_text = "" tool_names = [] for tool in tools: tool = tool.get("function", "") if tool.get("type") == "function" else tool param_text = "" for name, param in tool["parameters"]["properties"].items(): required, enum, items = "", "", "" if name in tool["parameters"].get("required", []): required = ", required" if param.get("enum", None): enum = ", should be one of [{}]".format(", ".join(param["enum"])) if param.get("items", None): items = ", where each item should be {}".format(param["items"].get("type", "")) param_text += " - {name} ({type}{required}): {desc}{enum}{items}\n".format( name=name, type=param.get("type", ""), required=required, desc=param.get("description", ""), enum=enum, items=items, ) tool_text += "> Tool Name: {name}\nTool Description: {desc}\nTool Args:\n{args}\n".format( name=tool["name"], desc=tool.get("description", ""), args=param_text ) tool_names.append(tool["name"]) return DEFAULT_TOOL_PROMPT.format(tool_text=tool_text, tool_names=", ".join(tool_names)) @override @staticmethod def function_formatter(functions: list["FunctionCall"]) -> str: return "\n".join([f"Action: {name}\nAction Input: {arguments}" for name, arguments in functions]) @override @staticmethod def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]: regex = re.compile(r"Action:\s*([a-zA-Z0-9_]+)\s*Action Input:\s*(.+?)(?=\s*Action:|\s*$)", re.DOTALL) action_match: list[tuple[str, str]] = re.findall(regex, content) if not action_match: return content results = [] for match in action_match: tool_name = match[0].strip() tool_input = match[1].strip().strip('"').strip("```") try: arguments = json.loads(tool_input) results.append(FunctionCall(tool_name, json.dumps(arguments, ensure_ascii=False))) except json.JSONDecodeError: return content return results class GLM4ToolUtils(ToolUtils): r"""GLM-4 tool using template.""" @override @staticmethod def tool_formatter(tools: list[dict[str, Any]]) -> str: tool_text = "" for tool in tools: tool = tool.get("function", "") if tool.get("type") == "function" else tool tool_text += "\n\n## {name}\n\n{body}\n在调用上述函数时,请使用 Json 格式表示调用的参数。".format( name=tool["name"], body=json.dumps(tool, indent=4, ensure_ascii=False) ) return GLM4_TOOL_PROMPT.format(tool_text=tool_text) @override @staticmethod def function_formatter(functions: list["FunctionCall"]) -> str: if len(functions) > 1: raise ValueError("GLM-4 does not support parallel functions.") return f"{functions[0].name}\n{functions[0].arguments}" @override @staticmethod def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]: if "\n" not in content: return content tool_name, tool_input = content.split("\n", maxsplit=1) try: arguments = json.loads(tool_input.strip()) except json.JSONDecodeError: return content return [FunctionCall(tool_name, json.dumps(arguments, ensure_ascii=False))] class Llama3ToolUtils(ToolUtils): r"""Llama 3.x tool using template with `tools_in_user_message=False`. Reference: https://www.llama.com/docs/model-cards-and-prompt-formats/llama3_1/#json-based-tool-calling """ @override @staticmethod def tool_formatter(tools: list[dict[str, Any]]) -> str: date = datetime.now().strftime("%d %b %Y") tool_text = "" for tool in tools: wrapped_tool = tool if tool.get("type") == "function" else {"type": "function", "function": tool} tool_text += json.dumps(wrapped_tool, indent=4, ensure_ascii=False) + "\n\n" return LLAMA3_TOOL_PROMPT.format(date=date, tool_text=tool_text) @override @staticmethod def function_formatter(functions: list["FunctionCall"]) -> str: function_objects = [{"name": name, "parameters": json.loads(arguments)} for name, arguments in functions] return json.dumps(function_objects[0] if len(function_objects) == 1 else function_objects, ensure_ascii=False) @override @staticmethod def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]: try: tools = json.loads(content.strip()) except json.JSONDecodeError: return content tools = [tools] if not isinstance(tools, list) else tools try: return [FunctionCall(tool["name"], json.dumps(tool["parameters"], ensure_ascii=False)) for tool in tools] except KeyError: return content class MiniMaxM1ToolUtils(ToolUtils): r"""MiniMax-M1 tool using template.""" @override @staticmethod def tool_formatter(tools: list[dict[str, Any]]) -> str: tool_text = "" for tool in tools: tool = tool.get("function", "") if tool.get("type") == "function" else tool tool_text += json.dumps(tool, ensure_ascii=False) + "\n" return MINIMAX_M1_TOOL_PROMPT.format(tool_text=tool_text) @override @staticmethod def function_formatter(functions: list["FunctionCall"]) -> str: function_texts = [] for func in functions: name, arguments = func.name, json.loads(func.arguments) function_texts.append(json.dumps({"name": name, "arguments": arguments}, ensure_ascii=False)) return "<tool_calls>\n" + "\n".join(function_texts) + "\n</tool_calls>" @override @staticmethod def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]: regex = re.compile(r"<tool_calls>\s*(.+?)\s*</tool_calls>", re.DOTALL) tool_match = re.search(regex, content) if not tool_match: return content tool_calls_content = tool_match.group(1) results = [] for line in tool_calls_content.split("\n"): line = line.strip() if not line: continue try: tool_call = json.loads(line) results.append(FunctionCall(tool_call["name"], json.dumps(tool_call["arguments"], ensure_ascii=False))) except json.JSONDecodeError: continue return results class MiniMaxM2ToolUtils(ToolUtils): r"""MiniMax-M2 tool using template.""" @override @staticmethod def tool_formatter(tools: list[dict[str, Any]]) -> str: tool_text = "" for tool in tools: tool = tool.get("function", "") if tool.get("type") == "function" else tool tool_text += "<tool>" + json.dumps(tool, ensure_ascii=False) + "</tool>\n" return MINIMAX_M2_TOOL_PROMPT.format(tool_text=tool_text) @override @staticmethod def function_formatter(functions: list["FunctionCall"]) -> str: function_texts = [] for func in functions: name, arguments = func.name, json.loads(func.arguments) prompt = f'<invoke name="{name}">' for key, value in arguments.items(): prompt += f'\n<parameter name="{key}">' if not isinstance(value, str): value = json.dumps(value, ensure_ascii=False) prompt += value + "</parameter>" prompt += "\n</invoke>" function_texts.append(prompt) @override @staticmethod def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]: regex = re.compile(r"<minimax:tool_call>\s*(.+?)\s*</minimax:tool_call>", re.DOTALL) tool_match = re.search(regex, content) if not tool_match: return content tool_calls_content = tool_match.group(1) invoke_regex = re.compile(r"<invoke name=\"(.*?)\">(.*?)</invoke>", re.DOTALL) results = [] for func_name, params_block in re.findall(invoke_regex, tool_calls_content): args_dict = {} param_pattern = re.compile(r"<parameter name=\"(.*?)\">(.*?)</parameter>", re.DOTALL) for key, raw_value in re.findall(param_pattern, params_block): value = raw_value.strip() try: parsed_value = json.loads(value) except json.JSONDecodeError: parsed_value = raw_value args_dict[key] = parsed_value results.append(FunctionCall(func_name.strip(), json.dumps(args_dict, ensure_ascii=False))) return results class MistralToolUtils(ToolUtils): r"""Mistral v0.3 tool using template.""" @override @staticmethod def tool_formatter(tools: list[dict[str, Any]]) -> str: wrapped_tools = [] for tool in tools: wrapped_tools.append(tool if tool.get("type") == "function" else {"type": "function", "function": tool}) return "[AVAILABLE_TOOLS] " + json.dumps(wrapped_tools, ensure_ascii=False) + "[/AVAILABLE_TOOLS]" @override @staticmethod def function_formatter(functions: list["FunctionCall"]) -> str: return json.dumps( [{"name": name, "arguments": json.loads(arguments)} for name, arguments in functions], ensure_ascii=False ) @override @staticmethod def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]: try: tools = json.loads(content.strip()) except json.JSONDecodeError: return content tools = [tools] if not isinstance(tools, list) else tools try: return [FunctionCall(tool["name"], json.dumps(tool["arguments"], ensure_ascii=False)) for tool in tools] except KeyError: return content class QwenToolUtils(ToolUtils): r"""Qwen 2.5 tool using template.""" @override @staticmethod def tool_formatter(tools: list[dict[str, Any]]) -> str: tool_text = "" for tool in tools: wrapped_tool = tool if tool.get("type") == "function" else {"type": "function", "function": tool} tool_text += "\n" + json.dumps(wrapped_tool, ensure_ascii=False) return QWEN_TOOL_PROMPT.format(tool_text=tool_text) @override @staticmethod def function_formatter(functions: list["FunctionCall"]) -> str: function_texts = [ json.dumps({"name": name, "arguments": json.loads(arguments)}, ensure_ascii=False) for name, arguments in functions ] return "\n".join([f"<tool_call>\n{text}\n</tool_call>" for text in function_texts]) @override @staticmethod def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]: regex = re.compile(r"<tool_call>(.+?)</tool_call>(?=\s*<tool_call>|\s*$)", re.DOTALL) tool_match: list[str] = re.findall(regex, content) if not tool_match: return content results = [] for tool in tool_match: try: tool = json.loads(tool.strip()) except json.JSONDecodeError: return content if "name" not in tool or "arguments" not in tool: return content results.append(FunctionCall(tool["name"], json.dumps(tool["arguments"], ensure_ascii=False))) return results class GLM4MOEToolUtils(QwenToolUtils): r"""GLM-4-MOE tool using template.""" @override @staticmethod def tool_formatter(tools: list[dict[str, Any]]) -> str: tool_text = "" for tool in tools: wrapped_tool = tool if tool.get("type") == "function" else {"type": "function", "function": tool} tool_text += "\n" + json.dumps(wrapped_tool, ensure_ascii=False) return GLM4_MOE_TOOL_PROMPT.format(tool_text=tool_text) @override @staticmethod def function_formatter(functions: list["FunctionCall"]) -> str: function_json = [ {"func_name": name, "func_key_values": json.loads(arguments)} for name, arguments in functions ] function_texts = [] for func in function_json: prompt = "\n<tool_call>" + func["func_name"] for key, value in func["func_key_values"].items(): prompt += "\n<arg_key>" + key + "</arg_key>" if not isinstance(value, str): value = json.dumps(value, ensure_ascii=False) prompt += "\n<arg_value>" + value + "</arg_value>" function_texts.append(prompt) return "\n".join(function_texts) class SeedToolUtils(ToolUtils): r"""Seed tool using template.""" @override @staticmethod def tool_formatter(tools: list[dict[str, Any]]) -> str: return SEED_TOOL_PROMPT.format(tool_text="\n" + json.dumps(tools, ensure_ascii=False)) @override @staticmethod def function_formatter(functions: list["FunctionCall"]) -> str: function_json = [ {"func_name": name, "func_key_values": json.loads(arguments)} for name, arguments in functions ] function_texts = [] for func in function_json: prompt = "\n<seed:tool_call>\n<function=" + func["func_name"] for key, value in func["func_key_values"].items(): prompt += "\n<parameter=" + key + ">" if not isinstance(value, str): value = json.dumps(value, ensure_ascii=False) prompt += value + "</parameter>" prompt += "\n</function>\n</seed:tool_call>" function_texts.append(prompt) return "\n".join(function_texts) @override @staticmethod def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]: results = [] regex = re.compile( r"<seed:tool_call>\s*<function=\s*([^\s<]+)\s*(.*?)\s*</function>\s*</seed:tool_call>", re.DOTALL ) for func_name, params_block in re.findall(regex, content): args_dict = {} param_pattern = re.compile(r"<parameter=(.*?)>(.*?)</parameter>", re.DOTALL) for key, raw_value in re.findall(param_pattern, params_block.strip()): value = raw_value.strip() try: parsed_value = json.loads(value) except json.JSONDecodeError: parsed_value = raw_value args_dict[key] = parsed_value results.append(FunctionCall(func_name.strip(), json.dumps(args_dict, ensure_ascii=False))) return results class LingToolUtils(QwenToolUtils): r"""Ling v2 tool using template.""" @override @staticmethod def tool_formatter(tools: list[dict[str, Any]]) -> str: tool_text = "" for tool in tools: wrapped_tool = tool if tool.get("type") == "function" else {"type": "function", "function": tool} tool_text += "\n" + json.dumps(wrapped_tool, ensure_ascii=False) return LING_TOOL_PROMPT.format(tool_text=tool_text) + "\n" + "detailed thinking off" TOOLS = { "default": DefaultToolUtils(), "glm4": GLM4ToolUtils(), "llama3": Llama3ToolUtils(), "minimax1": MiniMaxM1ToolUtils(), "minimax2": MiniMaxM2ToolUtils(), "mistral": MistralToolUtils(), "qwen": QwenToolUtils(), "glm4_moe": GLM4MOEToolUtils(), "seed_oss": SeedToolUtils(), "ling": LingToolUtils(), } def get_tool_utils(name: str) -> "ToolUtils": tool_utils = TOOLS.get(name, None) if tool_utils is None: raise ValueError(f"Tool utils `{name}` not found.") return tool_utils
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/converter.py
src/llamafactory/data/converter.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from abc import abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Union from ..extras import logging from .data_utils import Role if TYPE_CHECKING: from datasets import Dataset, IterableDataset from transformers import Seq2SeqTrainingArguments from ..hparams import DataArguments from .mm_plugin import AudioInput, ImageInput, VideoInput from .parser import DatasetAttr MediaType = Union[ImageInput, VideoInput, AudioInput] logger = logging.get_logger(__name__) @dataclass class DatasetConverter: dataset_attr: "DatasetAttr" data_args: "DataArguments" def _find_medias(self, medias: Union["MediaType", list["MediaType"], None]) -> list["MediaType"] | None: r"""Optionally concatenate media path to media dir when loading from local disk.""" if medias is None: return None elif not isinstance(medias, list): medias = [medias] elif len(medias) == 0: return None else: medias = medias[:] if self.dataset_attr.load_from in ["script", "file"]: if isinstance(medias[0], str): for i in range(len(medias)): media_path = os.path.join(self.data_args.media_dir, medias[i]) if os.path.isfile(media_path): medias[i] = media_path else: logger.warning_rank0_once( f"Media {medias[i]} does not exist in `media_dir`. Use original path." ) elif isinstance(medias[0], list): # for processed video frames # medias is a list of lists, e.g., [[frame1.jpg, frame2.jpg], [frame3.jpg, frame4.jpg]] for i in range(len(medias)): for j in range(len(medias[i])): media_path = os.path.join(self.data_args.media_dir, medias[i][j]) if os.path.isfile(media_path): medias[i][j] = media_path else: logger.warning_rank0_once( f"Media {medias[i][j]} does not exist in `media_dir`. Use original path." ) return medias @abstractmethod def __call__(self, example: dict[str, Any]) -> dict[str, Any]: r"""Convert a single example in the dataset to the standard format.""" ... @dataclass class AlpacaDatasetConverter(DatasetConverter): def __call__(self, example: dict[str, Any]) -> dict[str, Any]: prompt = [] if self.dataset_attr.history and isinstance(example[self.dataset_attr.history], list): for old_prompt, old_response in example[self.dataset_attr.history]: prompt.append({"role": Role.USER.value, "content": old_prompt}) prompt.append({"role": Role.ASSISTANT.value, "content": old_response}) query = [] if self.dataset_attr.prompt and example[self.dataset_attr.prompt]: query.append(example[self.dataset_attr.prompt]) if self.dataset_attr.query and example[self.dataset_attr.query]: query.append(example[self.dataset_attr.query]) prompt.append({"role": Role.USER.value, "content": "\n".join(query)}) # "prompt\nquery" if self.dataset_attr.kto_tag and isinstance(example[self.dataset_attr.kto_tag], bool): # kto example response = [{"role": Role.ASSISTANT.value, "content": example[self.dataset_attr.response]}] if example[self.dataset_attr.kto_tag]: response = response + [{"role": Role.ASSISTANT.value, "content": ""}] else: response = [{"role": Role.ASSISTANT.value, "content": ""}] + response elif ( self.dataset_attr.ranking and isinstance(example[self.dataset_attr.chosen], str) and isinstance(example[self.dataset_attr.rejected], str) ): # pairwise example response = [ {"role": Role.ASSISTANT.value, "content": example[self.dataset_attr.chosen]}, {"role": Role.ASSISTANT.value, "content": example[self.dataset_attr.rejected]}, ] elif self.dataset_attr.response and isinstance(example[self.dataset_attr.response], str): # normal example response = [{"role": Role.ASSISTANT.value, "content": example[self.dataset_attr.response]}] else: # unsupervised response = [] output = { "_prompt": prompt, "_response": response, "_system": example[self.dataset_attr.system] if self.dataset_attr.system else "", "_tools": example[self.dataset_attr.tools] if self.dataset_attr.tools else "", "_images": self._find_medias(example[self.dataset_attr.images]) if self.dataset_attr.images else None, "_videos": self._find_medias(example[self.dataset_attr.videos]) if self.dataset_attr.videos else None, "_audios": self._find_medias(example[self.dataset_attr.audios]) if self.dataset_attr.audios else None, } return output @dataclass class SharegptDatasetConverter(DatasetConverter): def __call__(self, example: dict[str, Any]) -> dict[str, Any]: tag_mapping = { self.dataset_attr.user_tag: Role.USER.value, self.dataset_attr.assistant_tag: Role.ASSISTANT.value, self.dataset_attr.observation_tag: Role.OBSERVATION.value, self.dataset_attr.function_tag: Role.FUNCTION.value, self.dataset_attr.system_tag: Role.SYSTEM.value, } odd_tags = (self.dataset_attr.user_tag, self.dataset_attr.observation_tag) even_tags = (self.dataset_attr.assistant_tag, self.dataset_attr.function_tag) accept_tags = (odd_tags, even_tags) messages = example[self.dataset_attr.messages] if ( self.dataset_attr.system_tag and len(messages) != 0 and messages[0][self.dataset_attr.role_tag] == self.dataset_attr.system_tag ): system = messages[0][self.dataset_attr.content_tag] messages = messages[1:] else: system = example[self.dataset_attr.system] if self.dataset_attr.system else "" aligned_messages = [] broken_data = False for turn_idx, message in enumerate(messages): if message[self.dataset_attr.role_tag] not in accept_tags[turn_idx % 2]: logger.warning_rank0(f"Invalid role tag in {messages}.") broken_data = True break aligned_messages.append( { "role": tag_mapping[message[self.dataset_attr.role_tag]], "content": message[self.dataset_attr.content_tag], } ) if (not self.dataset_attr.ranking and len(aligned_messages) % 2 != 0) or ( self.dataset_attr.ranking and len(aligned_messages) % 2 == 0 ): logger.warning_rank0(f"Invalid message count in {messages}.") broken_data = True if broken_data: logger.warning_rank0("Skipping this abnormal example.") prompt, response = [], [] elif self.dataset_attr.kto_tag and isinstance(example[self.dataset_attr.kto_tag], bool): # kto example prompt = aligned_messages[:-1] response = aligned_messages[-1:] if example[self.dataset_attr.kto_tag]: response = response + [{"role": Role.ASSISTANT.value, "content": ""}] else: response = [{"role": Role.ASSISTANT.value, "content": ""}] + response elif ( self.dataset_attr.ranking and isinstance(example[self.dataset_attr.chosen], dict) and isinstance(example[self.dataset_attr.rejected], dict) ): # pairwise example chosen = example[self.dataset_attr.chosen] rejected = example[self.dataset_attr.rejected] if ( chosen[self.dataset_attr.role_tag] not in accept_tags[-1] or rejected[self.dataset_attr.role_tag] not in accept_tags[-1] ): logger.warning_rank0(f"Invalid role tag in {[chosen, rejected]}.") broken_data = True prompt = aligned_messages response = [ { "role": tag_mapping[chosen[self.dataset_attr.role_tag]], "content": chosen[self.dataset_attr.content_tag], }, { "role": tag_mapping[rejected[self.dataset_attr.role_tag]], "content": rejected[self.dataset_attr.content_tag], }, ] else: # normal example prompt = aligned_messages[:-1] response = aligned_messages[-1:] output = { "_prompt": prompt, "_response": response, "_system": system, "_tools": example[self.dataset_attr.tools] if self.dataset_attr.tools else "", "_images": self._find_medias(example[self.dataset_attr.images]) if self.dataset_attr.images else None, "_videos": self._find_medias(example[self.dataset_attr.videos]) if self.dataset_attr.videos else None, "_audios": self._find_medias(example[self.dataset_attr.audios]) if self.dataset_attr.audios else None, } return output @dataclass class OpenAIDatasetConverter(DatasetConverter): def __call__(self, example: dict[str, Any]) -> dict[str, Any]: tag_mapping = { self.dataset_attr.user_tag: Role.USER.value, self.dataset_attr.assistant_tag: Role.ASSISTANT.value, self.dataset_attr.observation_tag: Role.OBSERVATION.value, self.dataset_attr.function_tag: Role.FUNCTION.value, self.dataset_attr.system_tag: Role.SYSTEM.value, } messages = example[self.dataset_attr.messages] if ( self.dataset_attr.system_tag and len(messages) != 0 and messages[0][self.dataset_attr.role_tag] == self.dataset_attr.system_tag ): system = messages[0][self.dataset_attr.content_tag] messages = messages[1:] else: system = example.get(self.dataset_attr.system, "") if self.dataset_attr.system else "" aligned_messages = [] tool_responses = [] broken_data = False for turn_idx, message in enumerate(messages): role = message[self.dataset_attr.role_tag] content = message[self.dataset_attr.content_tag] if role in [self.dataset_attr.assistant_tag, self.dataset_attr.function_tag]: if "tool_calls" in message and len(message["tool_calls"]) > 0: tool_calls_list = [tool["function"] for tool in message["tool_calls"]] content = json.dumps(tool_calls_list, ensure_ascii=False) role = self.dataset_attr.function_tag if role == self.dataset_attr.observation_tag: tool_responses.append(content) continue elif len(tool_responses) > 0: _content = "\n</tool_response>\n<tool_response>\n".join(tool_responses) aligned_messages.append( { "role": Role.OBSERVATION.value, "content": _content, } ) tool_responses = [] aligned_messages.append( { "role": tag_mapping[role], "content": content, } ) odd_tags = (Role.USER.value, Role.OBSERVATION.value) even_tags = (Role.ASSISTANT.value, Role.FUNCTION.value) accept_tags = (odd_tags, even_tags) for turn_idx, message in enumerate(aligned_messages): if message["role"] not in accept_tags[turn_idx % 2]: logger.warning_rank0(f"Invalid role tag in {messages}.") broken_data = True break if (not self.dataset_attr.ranking and len(aligned_messages) % 2 != 0) or ( self.dataset_attr.ranking and len(aligned_messages) % 2 == 0 ): logger.warning_rank0(f"Invalid message count in {messages}.") broken_data = True if broken_data: logger.warning_rank0("Skipping this abnormal example.") prompt, response = [], [] elif self.dataset_attr.kto_tag and isinstance(example[self.dataset_attr.kto_tag], bool): # kto example prompt = aligned_messages[:-1] response = aligned_messages[-1:] if example[self.dataset_attr.kto_tag]: response = response + [{"role": Role.ASSISTANT.value, "content": ""}] else: response = [{"role": Role.ASSISTANT.value, "content": ""}] + response elif ( self.dataset_attr.ranking and isinstance(example[self.dataset_attr.chosen], dict) and isinstance(example[self.dataset_attr.rejected], dict) ): # pairwise example chosen = example[self.dataset_attr.chosen] rejected = example[self.dataset_attr.rejected] if ( chosen[self.dataset_attr.role_tag] not in accept_tags[-1] or rejected[self.dataset_attr.role_tag] not in accept_tags[-1] ): logger.warning_rank0(f"Invalid role tag in {[chosen, rejected]}.") broken_data = True prompt = aligned_messages response = [ { "role": tag_mapping[chosen[self.dataset_attr.role_tag]], "content": chosen[self.dataset_attr.content_tag], }, { "role": tag_mapping[rejected[self.dataset_attr.role_tag]], "content": rejected[self.dataset_attr.content_tag], }, ] else: # normal example prompt = aligned_messages[:-1] response = aligned_messages[-1:] tools = example.get(self.dataset_attr.tools, "") if self.dataset_attr.tools else "" if isinstance(tools, dict) or isinstance(tools, list): tools = json.dumps(tools, ensure_ascii=False) short_system_prompt = "detailed thinking off" if not system: if not tools: system = short_system_prompt else: pass else: if not tools: if "detailed thinking on" in system or "detailed thinking off" in system: pass else: system += "\n" + short_system_prompt else: system += "\n" output = { "_prompt": prompt, "_response": response, "_system": system, "_tools": tools, "_images": self._find_medias(example[self.dataset_attr.images]) if self.dataset_attr.images else None, "_videos": self._find_medias(example[self.dataset_attr.videos]) if self.dataset_attr.videos else None, "_audios": self._find_medias(example[self.dataset_attr.audios]) if self.dataset_attr.audios else None, } return output DATASET_CONVERTERS = { "alpaca": AlpacaDatasetConverter, "sharegpt": SharegptDatasetConverter, "openai": OpenAIDatasetConverter, } def register_dataset_converter(name: str, dataset_converter: type["DatasetConverter"]) -> None: r"""Register a new dataset converter.""" if name in DATASET_CONVERTERS: raise ValueError(f"Dataset converter {name} already exists.") DATASET_CONVERTERS[name] = dataset_converter def get_dataset_converter(name: str, dataset_attr: "DatasetAttr", data_args: "DataArguments") -> "DatasetConverter": r"""Get a dataset converter.""" if name not in DATASET_CONVERTERS: raise ValueError(f"Dataset converter {name} not found.") return DATASET_CONVERTERS[name](dataset_attr, data_args) def align_dataset( dataset: Union["Dataset", "IterableDataset"], dataset_attr: "DatasetAttr", data_args: "DataArguments", training_args: "Seq2SeqTrainingArguments", ) -> Union["Dataset", "IterableDataset"]: r"""Align the dataset to a specific format. Aligned dataset: _prompt: [{"role": "user", "content": "..."}] * (2T - 1) _response: [{"role": "assistant", "content": "..."}] * N (N > 1 for ranking dataset) _system: "..." _tools: "..." _images: [] _videos: [] _audios: [] """ column_names = list(next(iter(dataset)).keys()) kwargs = {} if not data_args.streaming: kwargs = dict( num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache) or (training_args.local_process_index != 0), desc="Converting format of dataset", ) dataset_converter = get_dataset_converter(dataset_attr.formatting, dataset_attr, data_args) return dataset.map( dataset_converter, batched=False, remove_columns=column_names, **kwargs, )
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/formatter.py
src/llamafactory/data/formatter.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import re from abc import ABC, abstractmethod from dataclasses import dataclass, field from typing_extensions import override from .data_utils import SLOTS from .tool_utils import FunctionCall, get_tool_utils @dataclass class Formatter(ABC): slots: SLOTS = field(default_factory=list) tool_format: str | None = None @abstractmethod def apply(self, **kwargs) -> SLOTS: r"""Forms a list of slots according to the inputs to encode.""" ... def extract(self, content: str) -> str | list["FunctionCall"]: r"""Extract a list of tuples from the response message if using tools. Each tuple consists of function name and function arguments. """ raise NotImplementedError @dataclass class EmptyFormatter(Formatter): def __post_init__(self): has_placeholder = False for slot in filter(lambda s: isinstance(s, str), self.slots): if re.search(r"\{\{[a-zA-Z_][a-zA-Z0-9_]*\}\}", slot): has_placeholder = True if has_placeholder: raise ValueError("Empty formatter should not contain any placeholder.") @override def apply(self, **kwargs) -> SLOTS: return self.slots @dataclass class StringFormatter(Formatter): def __post_init__(self): has_placeholder = False for slot in filter(lambda s: isinstance(s, str), self.slots): if re.search(r"\{\{[a-zA-Z_][a-zA-Z0-9_]*\}\}", slot): has_placeholder = True if not has_placeholder: raise ValueError("A placeholder is required in the string formatter.") @override def apply(self, **kwargs) -> SLOTS: elements = [] for slot in self.slots: if isinstance(slot, str): for name, value in kwargs.items(): if not isinstance(value, str): raise RuntimeError(f"Expected a string, got {value}") slot = slot.replace("{{" + name + "}}", value, 1) elements.append(slot) elif isinstance(slot, (dict, set)): elements.append(slot) else: raise RuntimeError(f"Input must be string, set[str] or dict[str, str], got {type(slot)}.") return elements @dataclass class FunctionFormatter(StringFormatter): def __post_init__(self): super().__post_init__() self.tool_utils = get_tool_utils(self.tool_format) @override def apply(self, **kwargs) -> SLOTS: content: str = kwargs.pop("content") thought_words = kwargs.pop("thought_words", None) tool_call_words = kwargs.pop("tool_call_words", None) def _parse_functions(json_content: str) -> list["FunctionCall"]: try: tool_calls = json.loads(json_content) if not isinstance(tool_calls, list): # parallel function call tool_calls = [tool_calls] return [FunctionCall(tc["name"], json.dumps(tc["arguments"], ensure_ascii=False)) for tc in tool_calls] except json.JSONDecodeError: raise RuntimeError(f"Invalid JSON format in function message: {str([content])}.") tool_call_match = None if tool_call_words and len(tool_call_words) == 2: tool_call_regex = re.compile( rf"{re.escape(tool_call_words[0])}(.*?){re.escape(tool_call_words[1])}", re.DOTALL ) tool_call_match = re.search(tool_call_regex, content) if tool_call_match is None: thought_match = None if thought_words and len(thought_words) == 2: regex = re.compile(rf"{re.escape(thought_words[0])}(.*?){re.escape(thought_words[1])}", re.DOTALL) thought_match = re.search(regex, content) if thought_match: json_part = content.replace(thought_match.group(0), "") else: json_part = content functions = _parse_functions(json_part) function_str = self.tool_utils.function_formatter(functions) if thought_match: function_str = thought_match.group(0) + function_str else: thought_content = content.replace(tool_call_match.group(0), "") functions = _parse_functions(tool_call_match.group(1)) function_str = self.tool_utils.function_formatter(functions) function_str = thought_content + function_str return super().apply(content=function_str) @dataclass class ToolFormatter(Formatter): def __post_init__(self): self.tool_utils = get_tool_utils(self.tool_format) @override def apply(self, **kwargs) -> SLOTS: content = kwargs.pop("content") try: tools = json.loads(content) return [self.tool_utils.tool_formatter(tools) if len(tools) != 0 else ""] except json.JSONDecodeError: raise RuntimeError(f"Invalid JSON format in tool description: {str([content])}.") # flat string @override def extract(self, content: str) -> str | list["FunctionCall"]: return self.tool_utils.tool_extractor(content)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/__init__.py
src/llamafactory/data/__init__.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .collator import ( KTODataCollatorWithPadding, MultiModalDataCollatorForSeq2Seq, PairwiseDataCollatorWithPadding, SFTDataCollatorWith4DAttentionMask, ) from .data_utils import Role, split_dataset from .loader import get_dataset from .template import TEMPLATES, Template, get_template_and_fix_tokenizer __all__ = [ "TEMPLATES", "KTODataCollatorWithPadding", "MultiModalDataCollatorForSeq2Seq", "PairwiseDataCollatorWithPadding", "Role", "SFTDataCollatorWith4DAttentionMask", "Template", "get_dataset", "get_template_and_fix_tokenizer", "split_dataset", ]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/collator.py
src/llamafactory/data/collator.py
# Copyright 2025 OpenAccess AI Collective and the LlamaFactory team. # # This code is inspired by the OpenAccess AI Collective's axolotl library. # https://github.com/OpenAccess-AI-Collective/axolotl/blob/main/src/axolotl/monkeypatch/utils.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Literal, Optional import numpy as np import torch import torch.nn.functional as F from peft import PeftModel from transformers import DataCollatorForSeq2Seq from ..extras.constants import AUDIO_PLACEHOLDER, IGNORE_INDEX, IMAGE_PLACEHOLDER from ..extras.packages import is_pillow_available if is_pillow_available(): from PIL import Image if TYPE_CHECKING: from transformers import ProcessorMixin from .template import Template def prepare_4d_attention_mask(attention_mask_with_indices: "torch.Tensor", dtype: "torch.dtype") -> "torch.Tensor": r"""Expand 2d attention mask to 4d attention mask. Expand the attention mask with indices from (batch_size, seq_len) to (batch_size, 1, seq_len, seq_len), handle packed sequences and transforms the mask to lower triangular form to prevent future peeking. e.g. ```python # input [[1, 1, 2, 2, 2, 0]] # output [ [ [ [o, x, x, x, x, x], [o, o, x, x, x, x], [x, x, o, x, x, x], [x, x, o, o, x, x], [x, x, o, o, o, x], [x, x, x, x, x, x], ] ] ] ``` where `o` equals to `0.0`, `x` equals to `min_dtype`. """ _, seq_len = attention_mask_with_indices.size() min_dtype = torch.finfo(dtype).min zero_tensor = torch.tensor(0, dtype=dtype) # Create a non-padding mask. non_padding_mask = (attention_mask_with_indices != 0).unsqueeze(1).unsqueeze(2) # Create indices for comparison. indices = attention_mask_with_indices.unsqueeze(1).unsqueeze(2) # [bsz, 1, 1, seq_len] indices_t = attention_mask_with_indices.unsqueeze(1).unsqueeze(3) # [bsz, 1, seq_len, 1] # Create a lower triangular mask. tril_mask = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool)) attention_mask_4d = (indices == indices_t) & non_padding_mask & tril_mask # Invert the attention mask. attention_mask_4d = torch.where(attention_mask_4d, zero_tensor, min_dtype) return attention_mask_4d @dataclass class MultiModalDataCollatorForSeq2Seq(DataCollatorForSeq2Seq): r"""Data collator that supports VLMs. Features should contain input_ids, attention_mask, labels, and optionally contain images, videos and audios. """ template: Optional["Template"] = None processor: Optional["ProcessorMixin"] = None def __post_init__(self): if self.template is None: raise ValueError("Template is required for MultiModalDataCollator.") if isinstance(self.model, PeftModel): self.model = self.model.base_model.model if self.model is not None and hasattr(self.model, "get_rope_index"): # for qwen2vl mrope self.get_rope_func = self.model.get_rope_index # transformers < 4.52.0 or qwen2.5 omni elif self.model is not None and hasattr(self.model, "model") and hasattr(self.model.model, "get_rope_index"): self.get_rope_func = self.model.model.get_rope_index # transformers >= 4.52.0 else: self.get_rope_func = None def __call__(self, features: list[dict[str, Any]]) -> dict[str, "torch.Tensor"]: batch_images, batch_videos, batch_audios = [], [], [] batch_imglens, batch_vidlens, batch_audlens, batch_input_ids = [], [], [], [] for feature in features: images = feature.pop("images", None) or [] videos = feature.pop("videos", None) or [] audios = feature.pop("audios", None) or [] batch_images.extend(images) batch_videos.extend(videos) batch_audios.extend(audios) batch_imglens.append(len(images)) batch_vidlens.append(len(videos)) batch_audlens.append(len(audios)) batch_input_ids.append(feature["input_ids"]) fake_input_ids = [] if ( self.template.mm_plugin.image_token is not None and sum(batch_imglens) == 0 and sum(batch_vidlens) == 0 ): # avoid process hanging in zero3/fsdp case fake_messages = [{"role": "user", "content": IMAGE_PLACEHOLDER}] fake_images = [Image.new("RGB", (64, 64), (255, 255, 255))] fake_messages = self.template.mm_plugin.process_messages( fake_messages, fake_images, [], [], self.processor ) _fake_input_ids = self.tokenizer.encode(fake_messages[0]["content"], add_special_tokens=False) _fake_input_ids, _ = self.template.mm_plugin.process_token_ids( _fake_input_ids, None, fake_images, [], [], self.tokenizer, self.processor ) fake_input_ids.extend(_fake_input_ids) batch_images = fake_images batch_imglens[0] = 1 if ( self.template.mm_plugin.audio_token is not None and sum(batch_audlens) == 0 ): # avoid process hanging in zero3/fsdp case fake_messages = [{"role": "user", "content": AUDIO_PLACEHOLDER}] fake_audios = [np.zeros(1600)] fake_messages = self.template.mm_plugin.process_messages( fake_messages, [], [], fake_audios, self.processor ) _fake_input_ids = self.tokenizer.encode(fake_messages[0]["content"], add_special_tokens=False) _fake_input_ids, _ = self.template.mm_plugin.process_token_ids( _fake_input_ids, None, [], [], fake_audios, self.tokenizer, self.processor ) fake_input_ids.extend(_fake_input_ids) batch_audios = fake_audios batch_audlens[0] = 1 if len(fake_input_ids) != 0: if self.tokenizer.padding_side == "right": features[0]["input_ids"] = features[0]["input_ids"] + fake_input_ids features[0]["attention_mask"] = features[0]["attention_mask"] + [0] * len(fake_input_ids) features[0]["labels"] = features[0]["labels"] + [IGNORE_INDEX] * len(fake_input_ids) else: features[0]["input_ids"] = fake_input_ids + features[0]["input_ids"] features[0]["attention_mask"] = [0] * len(fake_input_ids) + features[0]["attention_mask"] features[0]["labels"] = [IGNORE_INDEX] * len(fake_input_ids) + features[0]["labels"] batch_input_ids[0] = features[0]["input_ids"] mm_inputs = self.template.mm_plugin.get_mm_inputs( batch_images, batch_videos, batch_audios, batch_imglens, batch_vidlens, batch_audlens, batch_input_ids, self.processor, ) if "token_type_ids" in mm_inputs: token_type_ids = mm_inputs.pop("token_type_ids") for i, feature in enumerate(features): feature["token_type_ids"] = token_type_ids[i] features: dict[str, torch.Tensor] = super().__call__(features) if self.get_rope_func is not None: rope_index_kwargs = { "input_ids": features["input_ids"], "image_grid_thw": mm_inputs.get("image_grid_thw"), "video_grid_thw": mm_inputs.get("video_grid_thw"), "attention_mask": (features["attention_mask"] >= 1).float(), } if "second_per_grid_ts" in mm_inputs: # for qwen2vl rope_index_kwargs["second_per_grid_ts"] = mm_inputs.get("second_per_grid_ts") elif "video_second_per_grid" in mm_inputs: # for qwen2.5 omni rope_index_kwargs["second_per_grids"] = mm_inputs.get("video_second_per_grid") if getattr(self.model.config, "model_type", None) in ["qwen2_5_omni_thinker", "qwen3_omni_moe_thinker"]: rope_index_kwargs["use_audio_in_video"] = getattr(self.processor, "use_audio_in_video", False) feature_attention_mask = mm_inputs.get("feature_attention_mask", None) if feature_attention_mask is not None: # FIXME: need to get video image lengths audio_feature_lengths = torch.sum(feature_attention_mask, dim=1) rope_index_kwargs["audio_seqlens"] = audio_feature_lengths # prepare for input features["position_ids"], rope_deltas = self.get_rope_func(**rope_index_kwargs) features["rope_deltas"] = rope_deltas - (1 - rope_index_kwargs["attention_mask"]).sum( dim=-1 ).unsqueeze(-1) else: # for qwen vl features["position_ids"], features["rope_deltas"] = self.get_rope_func(**rope_index_kwargs) if ( self.model is not None and getattr(self.model.config, "model_type", None) in [ "glm4v", "Keye", "qwen2_vl", "qwen2_5_vl", "qwen2_5_omni_thinker", "qwen3_omni_moe_thinker", "qwen3_vl", "qwen3_vl_moe", ] and ("position_ids" not in features or features["position_ids"].dim() != 3) ): raise ValueError(f"{self.model.config.model_type} requires 3D position ids for mrope.") if "cross_attention_mask" in mm_inputs: # for mllama inputs when pad_to_multiple_of is enabled cross_attention_mask = mm_inputs.pop("cross_attention_mask") seq_len = features["input_ids"].size(1) orig_len = cross_attention_mask.size(1) mm_inputs["cross_attention_mask"] = F.pad(cross_attention_mask, (0, 0, 0, 0, 0, seq_len - orig_len)) features.update(mm_inputs) if "image_bound" in features: # for minicpmv inputs bsz, seq_length = features["input_ids"].shape features["position_ids"] = torch.arange(seq_length).long().repeat(bsz, 1) return {"data": features, "input_ids": features["input_ids"], "labels": features["labels"]} return features @dataclass class SFTDataCollatorWith4DAttentionMask(MultiModalDataCollatorForSeq2Seq): r"""Data collator for 4d attention mask.""" block_diag_attn: bool = False attn_implementation: Literal["eager", "sdpa", "flash_attention_2"] = "eager" compute_dtype: "torch.dtype" = torch.float32 def __call__(self, features: list[dict[str, Any]]) -> dict[str, "torch.Tensor"]: features = super().__call__(features) if self.block_diag_attn and self.attn_implementation != "flash_attention_2": features["attention_mask"] = prepare_4d_attention_mask(features["attention_mask"], self.compute_dtype) for key, value in features.items(): # cast data dtype for paligemma if torch.is_tensor(value) and torch.is_floating_point(value): features[key] = value.to(self.compute_dtype) return features @dataclass class PairwiseDataCollatorWithPadding(MultiModalDataCollatorForSeq2Seq): r"""Data collator for pairwise data.""" def __call__(self, features: list[dict[str, Any]]) -> dict[str, "torch.Tensor"]: r"""Pad batched data to the longest sequence in the batch. We generate 2 * n examples where the first n examples represent chosen examples and the last n examples represent rejected examples. """ concatenated_features = [] for key in ("chosen", "rejected"): for feature in features: target_feature = { "input_ids": feature[f"{key}_input_ids"], "attention_mask": feature[f"{key}_attention_mask"], "labels": feature[f"{key}_labels"], "images": feature["images"], "videos": feature["videos"], "audios": feature["audios"], } concatenated_features.append(target_feature) return super().__call__(concatenated_features) @dataclass class KTODataCollatorWithPadding(MultiModalDataCollatorForSeq2Seq): r"""Data collator for KTO data.""" def __call__(self, features: list[dict[str, Any]]) -> dict[str, "torch.Tensor"]: target_features = [] kl_features = [] kto_tags = [] for feature in features: target_feature = { "input_ids": feature["input_ids"], "attention_mask": feature["attention_mask"], "labels": feature["labels"], "images": feature["images"], "videos": feature["videos"], "audios": feature["audios"], } kl_feature = { "input_ids": feature["kl_input_ids"], "attention_mask": feature["kl_attention_mask"], "labels": feature["kl_labels"], "images": feature["images"], "videos": feature["videos"], "audios": feature["audios"], } target_features.append(target_feature) kl_features.append(kl_feature) kto_tags.append(feature["kto_tags"]) batch = super().__call__(target_features) kl_batch = super().__call__(kl_features) batch["kl_input_ids"] = kl_batch["input_ids"] batch["kl_attention_mask"] = kl_batch["attention_mask"] batch["kl_labels"] = kl_batch["labels"] if "cross_attention_mask" in kl_batch: # for mllama inputs batch["kl_cross_attention_mask"] = kl_batch["cross_attention_mask"] if "token_type_ids" in kl_batch: batch["kl_token_type_ids"] = kl_batch["token_type_ids"] batch["kto_tags"] = torch.tensor(kto_tags) return batch
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/processor/pretrain.py
src/llamafactory/data/processor/pretrain.py
# Copyright 2025 HuggingFace Inc. and the LlamaFactory team. # # This code is inspired by the HuggingFace's transformers library. # https://github.com/huggingface/transformers/blob/v4.40.0/examples/pytorch/language-modeling/run_clm.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from itertools import chain from typing import Any from .processor_utils import DatasetProcessor @dataclass class PretrainDatasetProcessor(DatasetProcessor): def preprocess_dataset(self, examples: dict[str, list[Any]]) -> dict[str, list[Any]]: # build grouped texts with format `X1 X2 X3 ...` if packing is enabled eos_token = "<|end_of_text|>" if self.data_args.template == "llama3" else self.tokenizer.eos_token text_examples = [messages[0]["content"] + eos_token for messages in examples["_prompt"]] if not self.data_args.packing: if getattr(self.tokenizer, "add_bos_token", False): text_examples = [self.tokenizer.bos_token + example for example in text_examples] result = self.tokenizer( text_examples, add_special_tokens=False, truncation=True, max_length=self.data_args.cutoff_len ) else: tokenized_examples = self.tokenizer(text_examples, add_special_tokens=False) concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()} total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]]) block_size = self.data_args.cutoff_len total_length = (total_length // block_size) * block_size result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } if getattr(self.tokenizer, "add_bos_token", False): for i in range(len(result["input_ids"])): result["input_ids"][i][0] = self.tokenizer.bos_token_id return result def print_data_example(self, example: dict[str, list[int]]) -> None: print("input_ids:\n{}".format(example["input_ids"])) print("inputs:\n{}".format(self.tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/processor/feedback.py
src/llamafactory/data/processor/feedback.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from typing import TYPE_CHECKING, Any, Optional from ...extras import logging from ...extras.constants import IGNORE_INDEX from .processor_utils import DatasetProcessor, infer_seqlen if TYPE_CHECKING: from ..mm_plugin import AudioInput, ImageInput, VideoInput logger = logging.get_logger(__name__) class FeedbackDatasetProcessor(DatasetProcessor): def _encode_data_example( self, prompt: list[dict[str, str]], response: list[dict[str, str]], kl_response: list[dict[str, str]], system: Optional[str], tools: Optional[str], images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], ) -> tuple[list[int], list[int], list[int], list[int], bool]: if response[0]["content"]: # desired example kto_tag = True messages = prompt + [response[0]] else: # undesired example kto_tag = False messages = prompt + [response[1]] if kl_response[0]["content"]: kl_messages = prompt + [kl_response[0]] else: kl_messages = prompt + [kl_response[1]] messages = self.template.mm_plugin.process_messages(messages, images, videos, audios, self.processor) kl_messages = self.template.mm_plugin.process_messages(kl_messages, images, videos, audios, self.processor) prompt_ids, response_ids = self.template.encode_oneturn(self.tokenizer, messages, system, tools) kl_prompt_ids, kl_response_ids = self.template.encode_oneturn(self.tokenizer, kl_messages, system, tools) if self.template.efficient_eos: response_ids += [self.tokenizer.eos_token_id] kl_response_ids += [self.tokenizer.eos_token_id] prompt_ids, _ = self.template.mm_plugin.process_token_ids( prompt_ids, None, images, videos, audios, self.tokenizer, self.processor ) kl_prompt_ids, _ = self.template.mm_plugin.process_token_ids( kl_prompt_ids, None, images, videos, audios, self.tokenizer, self.processor ) source_len, target_len = infer_seqlen(len(prompt_ids), len(response_ids), self.data_args.cutoff_len) prompt_ids = prompt_ids[:source_len] response_ids = response_ids[:target_len] kl_source_len, kl_target_len = infer_seqlen( len(kl_prompt_ids), len(kl_response_ids), self.data_args.cutoff_len ) kl_prompt_ids = kl_prompt_ids[:kl_source_len] kl_response_ids = kl_response_ids[:kl_target_len] input_ids = prompt_ids + response_ids labels = [IGNORE_INDEX] * source_len + response_ids kl_input_ids = kl_prompt_ids + kl_response_ids kl_labels = [IGNORE_INDEX] * kl_source_len + kl_response_ids return input_ids, labels, kl_input_ids, kl_labels, kto_tag def preprocess_dataset(self, examples: dict[str, list[Any]]) -> dict[str, list[Any]]: # Creates mismatched pairs of prompts and completions for the KL dataset by adding a +1 offset to the order of completions. kl_response = [examples["_response"][-1]] + examples["_response"][:-1] model_inputs = defaultdict(list) for i in range(len(examples["_prompt"])): if len(examples["_prompt"][i]) % 2 != 1 or len(examples["_response"][i]) < 2: logger.warning_rank0( "Dropped invalid example: {}".format(examples["_prompt"][i] + examples["_response"][i]) ) continue input_ids, labels, kl_input_ids, kl_labels, kto_tag = self._encode_data_example( prompt=examples["_prompt"][i], response=examples["_response"][i], kl_response=kl_response[i], system=examples["_system"][i], tools=examples["_tools"][i], images=examples["_images"][i] or [], videos=examples["_videos"][i] or [], audios=examples["_audios"][i] or [], ) model_inputs["input_ids"].append(input_ids) model_inputs["attention_mask"].append([1] * len(input_ids)) model_inputs["labels"].append(labels) model_inputs["kl_input_ids"].append(kl_input_ids) model_inputs["kl_attention_mask"].append([1] * len(kl_input_ids)) model_inputs["kl_labels"].append(kl_labels) model_inputs["kto_tags"].append(kto_tag) model_inputs["images"].append(examples["_images"][i]) model_inputs["videos"].append(examples["_videos"][i]) model_inputs["audios"].append(examples["_audios"][i]) desirable_num = sum([1 for tag in model_inputs["kto_tags"] if tag]) undesirable_num = len(model_inputs["kto_tags"]) - desirable_num if desirable_num == 0 or undesirable_num == 0: logger.warning_rank0("Your dataset only has one preference type.") return model_inputs def print_data_example(self, example: dict[str, list[int]]) -> None: valid_labels = list(filter(lambda x: x != IGNORE_INDEX, example["labels"])) print("input_ids:\n{}".format(example["input_ids"])) print("inputs:\n{}".format(self.tokenizer.decode(example["input_ids"], skip_special_tokens=False))) print("label_ids:\n{}".format(example["labels"])) print(f"labels:\n{self.tokenizer.decode(valid_labels, skip_special_tokens=False)}")
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/processor/supervised.py
src/llamafactory/data/processor/supervised.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Optional from ...extras import logging from ...extras.constants import IGNORE_INDEX from .processor_utils import DatasetProcessor, greedy_knapsack, infer_seqlen if TYPE_CHECKING: from ..mm_plugin import AudioInput, ImageInput, VideoInput logger = logging.get_logger(__name__) @dataclass class SupervisedDatasetProcessor(DatasetProcessor): def _encode_data_example( self, prompt: list[dict[str, str]], response: list[dict[str, str]], system: Optional[str], tools: Optional[str], images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], ) -> tuple[list[int], list[int]]: messages = self.template.mm_plugin.process_messages(prompt + response, images, videos, audios, self.processor) input_ids, labels = self.template.mm_plugin.process_token_ids( [], [], images, videos, audios, self.tokenizer, self.processor ) encoded_pairs = self.template.encode_multiturn(self.tokenizer, messages, system, tools) total_length = len(input_ids) + (1 if self.template.efficient_eos else 0) if self.data_args.mask_history: encoded_pairs = encoded_pairs[::-1] # high priority for last turns for turn_idx, (source_ids, target_ids) in enumerate(encoded_pairs): if total_length >= self.data_args.cutoff_len: break source_len, target_len = infer_seqlen( len(source_ids), len(target_ids), self.data_args.cutoff_len - total_length ) source_ids = source_ids[:source_len] target_ids = target_ids[:target_len] total_length += source_len + target_len if self.data_args.train_on_prompt: source_label = source_ids elif self.template.efficient_eos and turn_idx != 0: source_label = [self.tokenizer.eos_token_id] + [IGNORE_INDEX] * (source_len - 1) else: source_label = [IGNORE_INDEX] * source_len if self.data_args.mask_history and turn_idx != 0: # train on the last turn only target_label = [IGNORE_INDEX] * target_len else: target_label = target_ids if self.data_args.mask_history: # reversed sequences input_ids = source_ids + target_ids + input_ids labels = source_label + target_label + labels else: input_ids += source_ids + target_ids labels += source_label + target_label if self.template.efficient_eos: input_ids += [self.tokenizer.eos_token_id] labels += [self.tokenizer.eos_token_id] return input_ids, labels def preprocess_dataset(self, examples: dict[str, list[Any]]) -> dict[str, list[Any]]: # build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>` # for multiturn examples, we only mask the prompt part in each prompt-response pair. model_inputs = defaultdict(list) for i in range(len(examples["_prompt"])): if len(examples["_prompt"][i]) % 2 != 1 or len(examples["_response"][i]) != 1: logger.warning_rank0( "Dropped invalid example: {}".format(examples["_prompt"][i] + examples["_response"][i]) ) continue input_ids, labels = self._encode_data_example( prompt=examples["_prompt"][i], response=examples["_response"][i], system=examples["_system"][i], tools=examples["_tools"][i], images=examples["_images"][i] or [], videos=examples["_videos"][i] or [], audios=examples["_audios"][i] or [], ) model_inputs["input_ids"].append(input_ids) model_inputs["attention_mask"].append([1] * len(input_ids)) model_inputs["labels"].append(labels) model_inputs["images"].append(examples["_images"][i]) model_inputs["videos"].append(examples["_videos"][i]) model_inputs["audios"].append(examples["_audios"][i]) return model_inputs def print_data_example(self, example: dict[str, list[int]]) -> None: valid_labels = list(filter(lambda x: x != IGNORE_INDEX, example["labels"])) print("input_ids:\n{}".format(example["input_ids"])) print("inputs:\n{}".format(self.tokenizer.decode(example["input_ids"], skip_special_tokens=False))) print("label_ids:\n{}".format(example["labels"])) print(f"labels:\n{self.tokenizer.decode(valid_labels, skip_special_tokens=False)}") @dataclass class PackedSupervisedDatasetProcessor(SupervisedDatasetProcessor): def preprocess_dataset(self, examples: dict[str, list[Any]]) -> dict[str, list[Any]]: # TODO: use `position_ids` to achieve packing # build inputs with format `<bos> X1 Y1 <eos> <bos> X2 Y2 <eos>` # and labels with format `<ignore> ... <ignore> Y1 <eos> <ignore> ... <ignore> Y2 <eos>` valid_num = 0 batch_input_ids, batch_labels, batch_images, batch_videos, batch_audios = [], [], [], [], [] lengths = [] length2indexes = defaultdict(list) for i in range(len(examples["_prompt"])): if len(examples["_prompt"][i]) % 2 != 1 or len(examples["_response"][i]) != 1: logger.warning_rank0( "Dropped invalid example: {}".format(examples["_prompt"][i] + examples["_response"][i]) ) continue input_ids, labels = self._encode_data_example( prompt=examples["_prompt"][i], response=examples["_response"][i], system=examples["_system"][i], tools=examples["_tools"][i], images=examples["_images"][i] or [], videos=examples["_videos"][i] or [], audios=examples["_audios"][i] or [], ) length = len(input_ids) if length > self.data_args.cutoff_len: logger.warning_rank0(f"Dropped lengthy example with length {length} > {self.data_args.cutoff_len}.") else: lengths.append(length) length2indexes[length].append(valid_num) batch_input_ids.append(input_ids) batch_labels.append(labels) batch_images.append(examples["_images"][i] or []) batch_videos.append(examples["_videos"][i] or []) batch_audios.append(examples["_audios"][i] or []) valid_num += 1 model_inputs = defaultdict(list) knapsacks = greedy_knapsack(lengths, self.data_args.cutoff_len) for knapsack in knapsacks: packed_input_ids, packed_attention_masks, packed_position_ids, packed_labels = [], [], [], [] packed_images, packed_videos, packed_audios = [], [], [] for i, length in enumerate(knapsack): index = length2indexes[length].pop() packed_input_ids += batch_input_ids[index] packed_position_ids += list(range(len(batch_input_ids[index]))) # NOTE: pad_to_multiple_of ignore this packed_labels += batch_labels[index] packed_images += batch_images[index] packed_videos += batch_videos[index] packed_audios += batch_audios[index] if self.data_args.neat_packing: packed_attention_masks += [i + 1] * len(batch_input_ids[index]) # start from 1 else: packed_attention_masks += [1] * len(batch_input_ids[index]) if len(packed_input_ids) < self.data_args.cutoff_len + 1: # avoid flash_attn drops attn mask pad_length = self.data_args.cutoff_len - len(packed_input_ids) + 1 packed_input_ids += [self.tokenizer.pad_token_id] * pad_length packed_position_ids += [0] * pad_length packed_labels += [IGNORE_INDEX] * pad_length if self.data_args.neat_packing: packed_attention_masks += [0] * pad_length else: packed_attention_masks += [1] * pad_length # more efficient flash_attn if len(packed_input_ids) != self.data_args.cutoff_len + 1: raise ValueError("The length of packed example should be identical to the cutoff length.") model_inputs["input_ids"].append(packed_input_ids) model_inputs["attention_mask"].append(packed_attention_masks) model_inputs["position_ids"].append(packed_position_ids) model_inputs["labels"].append(packed_labels) model_inputs["images"].append(packed_images or None) model_inputs["videos"].append(packed_videos or None) model_inputs["audios"].append(packed_audios or None) return model_inputs
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false