text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pytest import torch from lerobot.configs.types import FeatureType from lerobot.constants import OBS_ENV_STATE, OBS_IMAGE, OBS_IMAGES, OBS_STATE from lerobot.processor import VanillaObservationProcessor from lerobot.processor.pipeline import TransitionKey from tests.conftest import assert_contract_is_typed def create_transition( observation=None, action=None, reward=None, done=None, truncated=None, info=None, complementary_data=None ): """Helper to create an EnvTransition dictionary.""" return { TransitionKey.OBSERVATION: observation, TransitionKey.ACTION: action, TransitionKey.REWARD: reward, TransitionKey.DONE: done, TransitionKey.TRUNCATED: truncated, TransitionKey.INFO: info, TransitionKey.COMPLEMENTARY_DATA: complementary_data, } def test_process_single_image(): """Test processing a single image.""" processor = VanillaObservationProcessor() # Create a mock image (H, W, C) format, uint8 image = np.random.randint(0, 256, size=(64, 64, 3), dtype=np.uint8) observation = {"pixels": image} transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check that the image was processed correctly assert "observation.image" in processed_obs processed_img = processed_obs["observation.image"] # Check shape: should be (1, 3, 64, 64) - batch, channels, height, width assert processed_img.shape == (1, 3, 64, 64) # Check dtype and range assert processed_img.dtype == torch.float32 assert processed_img.min() >= 0.0 assert processed_img.max() <= 1.0 def test_process_image_dict(): """Test processing multiple images in a dictionary.""" processor = VanillaObservationProcessor() # Create mock images image1 = np.random.randint(0, 256, size=(32, 32, 3), dtype=np.uint8) image2 = np.random.randint(0, 256, size=(48, 48, 3), dtype=np.uint8) observation = {"pixels": {"camera1": image1, "camera2": image2}} transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check that both images were processed assert "observation.images.camera1" in processed_obs assert "observation.images.camera2" in processed_obs # Check shapes assert processed_obs["observation.images.camera1"].shape == (1, 3, 32, 32) assert processed_obs["observation.images.camera2"].shape == (1, 3, 48, 48) def test_process_batched_image(): """Test processing already batched images.""" processor = VanillaObservationProcessor() # Create a batched image (B, H, W, C) image = np.random.randint(0, 256, size=(2, 64, 64, 3), dtype=np.uint8) observation = {"pixels": image} transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check that batch dimension is preserved assert processed_obs["observation.image"].shape == (2, 3, 64, 64) def test_invalid_image_format(): """Test error handling for invalid image formats.""" processor = VanillaObservationProcessor() # Test wrong channel order (channels first) image = np.random.randint(0, 256, size=(3, 64, 64), dtype=np.uint8) observation = {"pixels": image} transition = create_transition(observation=observation) with pytest.raises(ValueError, match="Expected channel-last images"): processor(transition) def test_invalid_image_dtype(): """Test error handling for invalid image dtype.""" processor = VanillaObservationProcessor() # Test wrong dtype image = np.random.rand(64, 64, 3).astype(np.float32) observation = {"pixels": image} transition = create_transition(observation=observation) with pytest.raises(ValueError, match="Expected torch.uint8 images"): processor(transition) def test_no_pixels_in_observation(): """Test processor when no pixels are in observation.""" processor = VanillaObservationProcessor() observation = {"other_data": np.array([1, 2, 3])} transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Should preserve other data unchanged assert "other_data" in processed_obs np.testing.assert_array_equal(processed_obs["other_data"], np.array([1, 2, 3])) def test_none_observation(): """Test processor with None observation.""" processor = VanillaObservationProcessor() transition = create_transition() result = processor(transition) assert result == transition def test_serialization_methods(): """Test serialization methods.""" processor = VanillaObservationProcessor() # Test get_config config = processor.get_config() assert isinstance(config, dict) # Test state_dict state = processor.state_dict() assert isinstance(state, dict) # Test load_state_dict (should not raise) processor.load_state_dict(state) # Test reset (should not raise) processor.reset() def test_process_environment_state(): """Test processing environment_state.""" processor = VanillaObservationProcessor() env_state = np.array([1.0, 2.0, 3.0], dtype=np.float32) observation = {"environment_state": env_state} transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check that environment_state was renamed and processed assert "observation.environment_state" in processed_obs assert "environment_state" not in processed_obs processed_state = processed_obs["observation.environment_state"] assert processed_state.shape == (1, 3) # Batch dimension added assert processed_state.dtype == torch.float32 torch.testing.assert_close(processed_state, torch.tensor([[1.0, 2.0, 3.0]])) def test_process_agent_pos(): """Test processing agent_pos.""" processor = VanillaObservationProcessor() agent_pos = np.array([0.5, -0.5, 1.0], dtype=np.float32) observation = {"agent_pos": agent_pos} transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check that agent_pos was renamed and processed assert "observation.state" in processed_obs assert "agent_pos" not in processed_obs processed_state = processed_obs["observation.state"] assert processed_state.shape == (1, 3) # Batch dimension added assert processed_state.dtype == torch.float32 torch.testing.assert_close(processed_state, torch.tensor([[0.5, -0.5, 1.0]])) def test_process_batched_states(): """Test processing already batched states.""" processor = VanillaObservationProcessor() env_state = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) agent_pos = np.array([[0.5, -0.5], [1.0, -1.0]], dtype=np.float32) observation = {"environment_state": env_state, "agent_pos": agent_pos} transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check that batch dimensions are preserved assert processed_obs["observation.environment_state"].shape == (2, 2) assert processed_obs["observation.state"].shape == (2, 2) def test_process_both_states(): """Test processing both environment_state and agent_pos.""" processor = VanillaObservationProcessor() env_state = np.array([1.0, 2.0], dtype=np.float32) agent_pos = np.array([0.5, -0.5], dtype=np.float32) observation = {"environment_state": env_state, "agent_pos": agent_pos, "other_data": "keep_me"} transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check that both states were processed assert "observation.environment_state" in processed_obs assert "observation.state" in processed_obs # Check that original keys were removed assert "environment_state" not in processed_obs assert "agent_pos" not in processed_obs # Check that other data was preserved assert processed_obs["other_data"] == "keep_me" def test_no_states_in_observation(): """Test processor when no states are in observation.""" processor = VanillaObservationProcessor() observation = {"other_data": np.array([1, 2, 3])} transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Should preserve data unchanged np.testing.assert_array_equal(processed_obs, observation) def test_complete_observation_processing(): """Test processing a complete observation with both images and states.""" processor = VanillaObservationProcessor() # Create mock data image = np.random.randint(0, 256, size=(32, 32, 3), dtype=np.uint8) env_state = np.array([1.0, 2.0, 3.0], dtype=np.float32) agent_pos = np.array([0.5, -0.5, 1.0], dtype=np.float32) observation = { "pixels": image, "environment_state": env_state, "agent_pos": agent_pos, "other_data": "preserve_me", } transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check that image was processed assert "observation.image" in processed_obs assert processed_obs["observation.image"].shape == (1, 3, 32, 32) # Check that states were processed assert "observation.environment_state" in processed_obs assert "observation.state" in processed_obs # Check that original keys were removed assert "pixels" not in processed_obs assert "environment_state" not in processed_obs assert "agent_pos" not in processed_obs # Check that other data was preserved assert processed_obs["other_data"] == "preserve_me" def test_image_only_processing(): """Test processing observation with only images.""" processor = VanillaObservationProcessor() image = np.random.randint(0, 256, size=(64, 64, 3), dtype=np.uint8) observation = {"pixels": image} transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] assert "observation.image" in processed_obs assert len(processed_obs) == 1 def test_state_only_processing(): """Test processing observation with only states.""" processor = VanillaObservationProcessor() agent_pos = np.array([1.0, 2.0], dtype=np.float32) observation = {"agent_pos": agent_pos} transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] assert "observation.state" in processed_obs assert "agent_pos" not in processed_obs def test_empty_observation(): """Test processing empty observation.""" processor = VanillaObservationProcessor() observation = {} transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] assert processed_obs == {} def test_equivalent_to_original_function(): """Test that ObservationProcessor produces equivalent results to preprocess_observation.""" # Import the original function for comparison from lerobot.envs.utils import preprocess_observation processor = VanillaObservationProcessor() # Create test data similar to what the original function expects image = np.random.randint(0, 256, size=(64, 64, 3), dtype=np.uint8) env_state = np.array([1.0, 2.0, 3.0], dtype=np.float32) agent_pos = np.array([0.5, -0.5, 1.0], dtype=np.float32) observation = {"pixels": image, "environment_state": env_state, "agent_pos": agent_pos} # Process with original function original_result = preprocess_observation(observation) # Process with new processor transition = create_transition(observation=observation) processor_result = processor(transition)[TransitionKey.OBSERVATION] # Compare results assert set(original_result.keys()) == set(processor_result.keys()) for key in original_result: torch.testing.assert_close(original_result[key], processor_result[key]) def test_equivalent_with_image_dict(): """Test equivalence with dictionary of images.""" from lerobot.envs.utils import preprocess_observation processor = VanillaObservationProcessor() # Create test data with multiple cameras image1 = np.random.randint(0, 256, size=(32, 32, 3), dtype=np.uint8) image2 = np.random.randint(0, 256, size=(48, 48, 3), dtype=np.uint8) agent_pos = np.array([1.0, 2.0], dtype=np.float32) observation = {"pixels": {"cam1": image1, "cam2": image2}, "agent_pos": agent_pos} # Process with original function original_result = preprocess_observation(observation) # Process with new processor transition = create_transition(observation=observation) processor_result = processor(transition)[TransitionKey.OBSERVATION] # Compare results assert set(original_result.keys()) == set(processor_result.keys()) for key in original_result: torch.testing.assert_close(original_result[key], processor_result[key]) def test_image_processor_feature_contract_pixels_to_image(policy_feature_factory): processor = VanillaObservationProcessor() features = { "pixels": policy_feature_factory(FeatureType.VISUAL, (3, 64, 64)), "keep": policy_feature_factory(FeatureType.ENV, (1,)), } out = processor.feature_contract(features.copy()) assert OBS_IMAGE in out and out[OBS_IMAGE] == features["pixels"] assert "pixels" not in out assert out["keep"] == features["keep"] assert_contract_is_typed(out) def test_image_processor_feature_contract_observation_pixels_to_image(policy_feature_factory): processor = VanillaObservationProcessor() features = { "observation.pixels": policy_feature_factory(FeatureType.VISUAL, (3, 64, 64)), "keep": policy_feature_factory(FeatureType.ENV, (1,)), } out = processor.feature_contract(features.copy()) assert OBS_IMAGE in out and out[OBS_IMAGE] == features["observation.pixels"] assert "observation.pixels" not in out assert out["keep"] == features["keep"] assert_contract_is_typed(out) def test_image_processor_feature_contract_multi_camera_and_prefixed(policy_feature_factory): processor = VanillaObservationProcessor() features = { "pixels.front": policy_feature_factory(FeatureType.VISUAL, (3, 64, 64)), "pixels.wrist": policy_feature_factory(FeatureType.VISUAL, (3, 64, 64)), "observation.pixels.rear": policy_feature_factory(FeatureType.VISUAL, (3, 64, 64)), "keep": policy_feature_factory(FeatureType.ENV, (7,)), } out = processor.feature_contract(features.copy()) assert f"{OBS_IMAGES}.front" in out and out[f"{OBS_IMAGES}.front"] == features["pixels.front"] assert f"{OBS_IMAGES}.wrist" in out and out[f"{OBS_IMAGES}.wrist"] == features["pixels.wrist"] assert f"{OBS_IMAGES}.rear" in out and out[f"{OBS_IMAGES}.rear"] == features["observation.pixels.rear"] assert "pixels.front" not in out and "pixels.wrist" not in out and "observation.pixels.rear" not in out assert out["keep"] == features["keep"] assert_contract_is_typed(out) def test_state_processor_feature_contract_environment_and_agent_pos(policy_feature_factory): processor = VanillaObservationProcessor() features = { "environment_state": policy_feature_factory(FeatureType.STATE, (3,)), "agent_pos": policy_feature_factory(FeatureType.STATE, (7,)), "keep": policy_feature_factory(FeatureType.ENV, (1,)), } out = processor.feature_contract(features.copy()) assert OBS_ENV_STATE in out and out[OBS_ENV_STATE] == features["environment_state"] assert OBS_STATE in out and out[OBS_STATE] == features["agent_pos"] assert "environment_state" not in out and "agent_pos" not in out assert out["keep"] == features["keep"] assert_contract_is_typed(out) def test_state_processor_feature_contract_prefixed_inputs(policy_feature_factory): proc = VanillaObservationProcessor() features = { "observation.environment_state": policy_feature_factory(FeatureType.STATE, (2,)), "observation.agent_pos": policy_feature_factory(FeatureType.STATE, (4,)), } out = proc.feature_contract(features.copy()) assert OBS_ENV_STATE in out and out[OBS_ENV_STATE] == features["observation.environment_state"] assert OBS_STATE in out and out[OBS_STATE] == features["observation.agent_pos"] assert "environment_state" not in out and "agent_pos" not in out assert_contract_is_typed(out)
lerobot/tests/processor/test_observation_processor.py/0
{ "file_path": "lerobot/tests/processor/test_observation_processor.py", "repo_id": "lerobot", "token_count": 6090 }
193
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import numpy as np import pytest import torch from lerobot.utils.random_utils import ( deserialize_numpy_rng_state, deserialize_python_rng_state, deserialize_rng_state, deserialize_torch_rng_state, get_rng_state, seeded_context, serialize_numpy_rng_state, serialize_python_rng_state, serialize_rng_state, serialize_torch_rng_state, set_rng_state, set_seed, ) @pytest.fixture def fixed_seed(): """Fixture to set a consistent initial seed for each test.""" set_seed(12345) yield def test_serialize_deserialize_python_rng(fixed_seed): # Save state after generating val1 _ = random.random() st = serialize_python_rng_state() # Next random is val2 val2 = random.random() # Restore the state, so the next random should match val2 deserialize_python_rng_state(st) val3 = random.random() assert val2 == val3 def test_serialize_deserialize_numpy_rng(fixed_seed): _ = np.random.rand() st = serialize_numpy_rng_state() val2 = np.random.rand() deserialize_numpy_rng_state(st) val3 = np.random.rand() assert val2 == val3 def test_serialize_deserialize_torch_rng(fixed_seed): _ = torch.rand(1).item() st = serialize_torch_rng_state() val2 = torch.rand(1).item() deserialize_torch_rng_state(st) val3 = torch.rand(1).item() assert val2 == val3 def test_serialize_deserialize_rng(fixed_seed): # Generate one from each library _ = random.random() _ = np.random.rand() _ = torch.rand(1).item() # Serialize st = serialize_rng_state() # Generate second set val_py2 = random.random() val_np2 = np.random.rand() val_th2 = torch.rand(1).item() # Restore, so the next draws should match val_py2, val_np2, val_th2 deserialize_rng_state(st) assert random.random() == val_py2 assert np.random.rand() == val_np2 assert torch.rand(1).item() == val_th2 def test_get_set_rng_state(fixed_seed): st = get_rng_state() val1 = (random.random(), np.random.rand(), torch.rand(1).item()) # Change states random.random() np.random.rand() torch.rand(1) # Restore set_rng_state(st) val2 = (random.random(), np.random.rand(), torch.rand(1).item()) assert val1 == val2 def test_set_seed(): set_seed(1337) val1 = (random.random(), np.random.rand(), torch.rand(1).item()) set_seed(1337) val2 = (random.random(), np.random.rand(), torch.rand(1).item()) assert val1 == val2 def test_seeded_context(fixed_seed): val1 = (random.random(), np.random.rand(), torch.rand(1).item()) with seeded_context(1337): seeded_val1 = (random.random(), np.random.rand(), torch.rand(1).item()) val2 = (random.random(), np.random.rand(), torch.rand(1).item()) with seeded_context(1337): seeded_val2 = (random.random(), np.random.rand(), torch.rand(1).item()) assert seeded_val1 == seeded_val2 assert all(a != b for a, b in zip(val1, seeded_val1, strict=True)) # changed inside the context assert all(a != b for a, b in zip(val2, seeded_val2, strict=True)) # changed again after exiting
lerobot/tests/utils/test_random_utils.py/0
{ "file_path": "lerobot/tests/utils/test_random_utils.py", "repo_id": "lerobot", "token_count": 1453 }
194
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import List, Optional from open_r1.utils.evaluation import SUPPORTED_BENCHMARKS, run_benchmark_jobs from open_r1.configs import SFTConfig from trl import ModelConfig, TrlParser @dataclass class ScriptArguments: model_id: str = field( default="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", metadata={"help": "The Hub model id to push the model to."}, ) model_revision: str = field(default="main", metadata={"help": "The Hub model branch to push the model to."}) trust_remote_code: bool = field(default=False, metadata={"help": "Trust the remote code."}) benchmarks: List[str] = field( default_factory=lambda: [], metadata={"help": "The benchmarks to run after training."} ) list_benchmarks: bool = field(default=False, metadata={"help": "List all supported benchmarks."}) system_prompt: Optional[str] = field( default=None, metadata={"help": "The system prompt to use for the benchmark."} ) def main(): parser = TrlParser(ScriptArguments) args = parser.parse_args_and_config()[0] if args.list_benchmarks: print("Supported benchmarks:") for benchmark in SUPPORTED_BENCHMARKS: print(f" - {benchmark}") return benchmark_args = SFTConfig( output_dir="", hub_model_id=args.model_id, hub_model_revision=args.model_revision, benchmarks=args.benchmarks, system_prompt=args.system_prompt, ) run_benchmark_jobs( benchmark_args, ModelConfig(model_name_or_path="", model_revision="", trust_remote_code=args.trust_remote_code), ) if __name__ == "__main__": main()
open-r1/scripts/run_benchmarks.py/0
{ "file_path": "open-r1/scripts/run_benchmarks.py", "repo_id": "open-r1", "token_count": 815 }
195
#!/bin/bash #SBATCH --job-name=open_r1 #SBATCH --ntasks-per-node=1 #SBATCH --exclusive #SBATCH --gres=gpu:8 #SBATCH --partition=hopper-prod # Adjust this for your cluster #SBATCH --output=./logs/%x-%j.out #SBATCH --error=./logs/%x-%j.err #SBATCH --requeue #SBATCH --time=3-00:00:00 if [[ "$*" == *"--help"* ]]; then echo "Usage: sbatch slurm/train.slurm [options]" echo "Options:" echo " --model MODEL Model name" echo " --task TASK Task name (e.g. sft, grpo)" echo " --config SUFFIX Configuration suffix (e.g. demo, v00.00)" echo " --accelerator CONFIG Accelerator configuration name (e.g. zero3)" echo " --dp N Data parallelism for vLLM server (default: 1)" echo " --tp N Tensor parallelism for vLLM server (default: 1)" echo " --args \"ARGS\" Optional arguments to pass to the training script" exit 0 fi # Specific configuration optimized for the Hugging Face Compute Cluster module load cuda/12.4 set -x -e source ~/.bashrc source openr1/bin/activate START_TIME=$(date +%s) echo "START TIME: $(date)" # Refresh Weka on h4 cache echo "Refreshing Weka filesystem..." find -L /fsx/h4/ -type f | xargs -d '\n' -r -n512 -P64 weka fs tier fetch # Default values MODEL="" TASK="" CONFIG_SUFFIX="" ACCELERATOR="" DP=1 TP=1 OPTIONAL_ARGS="" # Parse command line arguments while [[ $# -gt 0 ]]; do case $1 in --model) MODEL="$2" shift 2 ;; --task) TASK="$2" shift 2 ;; --config) CONFIG_SUFFIX="$2" shift 2 ;; --accelerator) ACCELERATOR="$2" shift 2 ;; --dp) DP="$2" shift 2 ;; --tp) TP="$2" shift 2 ;; --args) OPTIONAL_ARGS="$2" shift 2 ;; *) echo "Unknown option: $1" echo "Use --help for usage information" exit 1 ;; esac done # Validate required arguments if [[ -z "$MODEL" || -z "$TASK" || -z "$CONFIG_SUFFIX" || -z "$ACCELERATOR" ]]; then echo "Error: Missing required arguments" echo "Run with --help for usage information" exit 1 fi CONFIG_FILE=recipes/$MODEL/$TASK/config_$CONFIG_SUFFIX.yaml GRAD_ACC_STEPS=$(grep 'gradient_accumulation_steps' $CONFIG_FILE | awk '{print $2}') # Split the string into individual arguments IFS=' ' read -ra ARGS <<< "$OPTIONAL_ARGS" # Loop through the arguments and find the one with "--gradient_accumulation_steps" for arg in "${ARGS[@]}"; do if [[ "$arg" == "--gradient_accumulation_steps="* ]]; then # Extract the value after the equals sign GRAD_ACC_STEPS="${arg#*=}" break # Exit the loop once we find the desired argument fi done echo "Gradient accumulation steps: $GRAD_ACC_STEPS" MODEL=$(grep 'model_name_or_path:' $CONFIG_FILE | awk '{print $2}') REVISION=$(grep 'model_revision:' $CONFIG_FILE | head -n 1 | awk '{print $2}') # Distributed configuration NUM_NODES=$SLURM_NNODES GPUS_PER_NODE=8 WORLD_SIZE=$(($NUM_NODES*$GPUS_PER_NODE)) NODELIST=($(scontrol show hostnames $SLURM_JOB_NODELIST)) MASTER_ADDR=${NODELIST[0]} # First node for main process MASTER_PORT=6000 TRAIN_NODES=("${NODELIST[@]}") USE_VLLM="false" if [[ -f "$CONFIG_FILE" ]] && grep -qE '^\s*use_vllm:\s*true' "$CONFIG_FILE"; then USE_VLLM="true" fi # if using vllm if [[ "$USE_VLLM" == "true" ]]; then TRAIN_NODES=("${NODELIST[@]:0:$((NUM_NODES - 1))}") VLLM_NODE=${NODELIST[-1]} # Last node WORLD_SIZE=$((WORLD_SIZE - GPUS_PER_NODE)) NUM_NODES=$((NUM_NODES - 1)) srun --nodes=1 --ntasks=1 --nodelist=$VLLM_NODE trl vllm-serve --model $MODEL --revision $REVISION --tensor_parallel_size $TP --data_parallel_size $DP & OPTIONAL_ARGS="$OPTIONAL_ARGS --vllm_server_host=$VLLM_NODE" fi # force crashing on nccl issues like hanging broadcast export NCCL_ASYNC_ERROR_HANDLING=1 # export NCCL_DEBUG=INFO # export NCCL_DEBUG_SUBSYS=COLL # export NCCL_SOCKET_NTHREADS=1 # export NCCL_NSOCKS_PERTHREAD=1 # export CUDA_LAUNCH_BLOCKING=1 export CMD=" \ src/open_r1/$TASK.py --config $CONFIG_FILE $OPTIONAL_ARGS " export LAUNCHER="ACCELERATE_LOG_LEVEL=info TRANSFORMERS_VERBOSITY=info accelerate launch \ --config_file recipes/accelerate_configs/$ACCELERATOR.yaml \ --gradient_accumulation_steps $GRAD_ACC_STEPS \ --num_machines $NUM_NODES \ --num_processes $WORLD_SIZE \ --main_process_ip $MASTER_ADDR \ --main_process_port $MASTER_PORT \ --machine_rank $SLURM_PROCID \ --rdzv_backend=c10d \ --max_restarts 1 \ --tee 3 \ " # srun error handling: # --wait=60: wait 60 sec after the first task terminates before terminating all remaining tasks # --kill-on-bad-exit=1: terminate a step if any task exits with a non-zero exit code NODELIST=$(IFS=,; echo "${TRAIN_NODES[*]}") SRUN_ARGS=" \ --wait=60 \ --kill-on-bad-exit=1 \ --nodes=$NUM_NODES \ --ntasks=$NUM_NODES \ --nodelist=$NODELIST " srun $SRUN_ARGS bash -c "$LAUNCHER $CMD" 2>&1 END_TIME=$(date +%s) echo "END TIME: $(date)" ELAPSED_SECONDS=$((END_TIME - START_TIME)) HOURS=$((ELAPSED_SECONDS / 3600)) MINUTES=$(( (ELAPSED_SECONDS % 3600) / 60 )) SECONDS=$((ELAPSED_SECONDS % 60)) echo "TOTAL JOB TIME: ${HOURS}h ${MINUTES}m ${SECONDS}s (${ELAPSED_SECONDS} seconds)"
open-r1/slurm/train.slurm/0
{ "file_path": "open-r1/slurm/train.slurm", "repo_id": "open-r1", "token_count": 2351 }
196
import asyncio import os import random import re import subprocess from collections import Counter from functools import lru_cache import aiohttp class PistonError(Exception): pass @lru_cache(maxsize=1) def get_piston_client_from_env(session=None): piston_endpoints = os.getenv("PISTON_ENDPOINTS") if piston_endpoints is None: raise ValueError( "For IOI/CF problems Piston endpoints running our IOI package are required. Please add a list of valid Piston endpoints to a PISTON_ENDPOINTS variable in a `.env` file." ) piston_endpoints = sorted( piston_endpoints.split(",") if piston_endpoints != "slurm" else get_slurm_piston_endpoints() ) gpu_nb = int(os.getenv("LOCAL_RANK", 0)) # per‑GPU index world = int(os.getenv("WORLD_SIZE", 1)) # total GPUs if world > 1: print(f"Using a subset of piston endpoints for GPU#{gpu_nb}") piston_endpoints = piston_endpoints[gpu_nb::world] random.shuffle(piston_endpoints) max_requests_per_endpoint = os.getenv("PISTON_MAX_REQUESTS_PER_ENDPOINT", "1") return PistonClient(piston_endpoints, session, max_requests_per_endpoint=int(max_requests_per_endpoint)) class PistonClient: """ A client that will automatically load balance across multiple Piston (https://github.com/engineer-man/piston) workers. This assumes piston is running our custom cms_ioi package: https://github.com/guipenedo/piston/releases/ We recommend starting the instances with the following script as otherwise some IOI problems will hit default limits: ``` export PISTON_COMPILE_TIMEOUT=60000 export PISTON_RUN_TIMEOUT=60000 export PISTON_OUTPUT_MAX_SIZE=1000000000 export PISTON_MAX_FILE_SIZE=1000000000 export PISTON_DISABLE_NETWORKING=true export PISTON_REPO_URL=https://github.com/guipenedo/piston/releases/download/pkgs/index mkdir /piston sed -i '/app.use(body_parser.urlencoded/c\ app.use(body_parser.urlencoded({ extended: true, limit: \"512mb\" }));' src/index.js sed -i '/app.use(body_parser.json/c\ app.use(body_parser.json({ limit: \"512mb\" }));' src/index.js # Start server in background node src``` Piston docs for API usage: https://piston.readthedocs.io/en/latest/api-v2/ """ def __init__( self, base_endpoint: str | list[str] = "http://ip-10-53-80-65:3223/api/v2", session=None, max_requests_per_endpoint=1, ): self.max_requests_per_endpoint = max_requests_per_endpoint self.base_endpoints = [base_endpoint] if isinstance(base_endpoint, str) else base_endpoint if len(self.base_endpoints) == 0: raise ValueError("No Piston endpoints provided. Please check your PISTON_ENDPOINTS environment variable.") self.endpoint_ids = {endpoint: i for i, endpoint in enumerate(self.base_endpoints)} self._session = session self.endpoint_tokens = asyncio.Queue(maxsize=max_requests_per_endpoint * len(self.base_endpoints)) for _ in range(max_requests_per_endpoint): for base_endpoint in self.base_endpoints: self.endpoint_tokens.put_nowait(base_endpoint) self._endpoint_failures = Counter() self._unhealthy_endpoints = set() self._endpoint_failures_lock = asyncio.Lock() @property def session(self): if self._session is None: self._session = aiohttp.ClientSession( timeout=aiohttp.ClientTimeout(sock_read=30), connector=aiohttp.TCPConnector( limit=self.max_requests_per_endpoint * len(self.base_endpoints), ttl_dns_cache=300, keepalive_timeout=5 * 60, ), ) return self._session async def _wait_for_endpoint(self): endpoint = await self.endpoint_tokens.get() return endpoint async def _release_endpoint(self, endpoint): await self.endpoint_tokens.put(endpoint) async def _send_request(self, endpoint, route, data=None, method="post"): async with self.session.request( method, f"{endpoint.rstrip('/')}/{route}", json=data, headers={"Content-Type": "application/json"} ) as response: return await response.json(content_type=None) async def _send_to_all(self, route, data=None, method="post"): return await asyncio.gather( *[self._send_request(endpoint, route, data, method) for endpoint in self.base_endpoints] ) async def _send_to_one(self, endpoint, route, data=None, method="post"): return await self._send_request(endpoint, route, data, method) async def install_package(self, language, version): return await self._send_to_all("packages", {"language": language, "version": version}, method="post") async def uninstall_package(self, language, version): return await self._send_to_all("packages", {"language": language, "version": version}, method="delete") async def get_supported_runtimes(self): return await self._send_to_all("runtimes", method="get") async def _check_failed_endpoint(self, endpoint): async with self._endpoint_failures_lock: if endpoint in self._unhealthy_endpoints: return try: await asyncio.sleep(5) await self.get_supported_runtimes() except Exception as e: print(f"Error checking endpoint {endpoint}, dropping it ({e})") self._unhealthy_endpoints.add(endpoint) if len(self._unhealthy_endpoints) >= len(self.base_endpoints): raise PistonError("All endpoints are unhealthy. Please check your Piston workers.") async def send_execute(self, data, language="cms_ioi", max_retries=5): data = data | { "language": language, "version": "*", } base_delay = 1.0 status = None endpoint = None for attempt in range(max_retries + 1): try: endpoint = await self._wait_for_endpoint() if attempt > 0: await asyncio.sleep(1) async with self.session.post( f"{endpoint.rstrip('/')}/execute", json=data, headers={"Content-Type": "application/json"} ) as response: status = response.status res_json = await response.json(content_type=None) if status != 200: raise PistonError(f"Server error. status={status}. {res_json}") if res_json is None: raise PistonError(f"Empty response. status={status}") # piston overloaded if "run" in res_json and "Resource temporarily unavailable" in res_json["run"].get("stderr", ""): raise PistonError(f"Piston overloaded: {res_json['run']['stderr']}") return res_json except (PistonError, asyncio.TimeoutError, aiohttp.ClientConnectionError, RuntimeError) as e: # Only retry if we haven't reached max retries yet if attempt < max_retries: # Calculate backoff with jitter delay = min(base_delay * (2**attempt), 10) # Exponential backoff, capped at 10 seconds jitter = delay * 0.2 * (2 * asyncio.get_event_loop().time() % 1 - 0.5) # Add ±10% jitter retry_delay = delay + jitter print(f"Retrying in {retry_delay:.2f} seconds [{self.endpoint_ids[endpoint]}] {endpoint} - {e}") # special case: worker died if isinstance(e, aiohttp.ClientConnectionError) and "Connect call failed" in str(e): await self._check_failed_endpoint(endpoint) else: # hopefully we won't get this one again await self._release_endpoint(endpoint) endpoint = None await asyncio.sleep(retry_delay) else: await self._check_failed_endpoint(endpoint) except Exception as e: print(f"Propagating exception {type(e)}: {e}") raise e finally: # Ensure endpoint is always released, even if an exception occurs if endpoint is not None: try: await self._release_endpoint(endpoint) except Exception as e: print(f"Error releasing endpoint {endpoint}: {e}") endpoint = None def get_slurm_piston_endpoints(): """Get list of active piston worker endpoints from squeue output""" # Run squeue command to get job name, hostname and status, filtering for RUNNING state result = subprocess.run( ["squeue", '--format="%j %N %T"', "--noheader", "--states=RUNNING"], capture_output=True, text=True ) # Split output into lines and skip header lines = result.stdout.strip().split("\n") endpoints = [] for line in lines: # Parse job name from squeue output fields = line.split() job_name = fields[0].strip('"') # Remove quotes hostname = fields[1] # Extract port if job name matches pattern match = re.match(r"piston-worker-(\d+)", job_name) if match: port = match.group(1) endpoints.append(f"http://{hostname}:{port}/api/v2") return endpoints
open-r1/src/open_r1/utils/competitive_programming/piston_client.py/0
{ "file_path": "open-r1/src/open_r1/utils/competitive_programming/piston_client.py", "repo_id": "open-r1", "token_count": 4290 }
197
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <h1 align="center"> <p>🤗 PEFT</p></h1> <h3 align="center"> <p>State-of-the-art Parameter-Efficient Fine-Tuning (PEFT) methods</p> </h3> Fine-tuning large pretrained models is often prohibitively costly due to their scale. Parameter-Efficient Fine-Tuning (PEFT) methods enable efficient adaptation of large pretrained models to various downstream applications by only fine-tuning a small number of (extra) model parameters instead of all the model's parameters. This significantly decreases the computational and storage costs. Recent state-of-the-art PEFT techniques achieve performance comparable to fully fine-tuned models. PEFT is integrated with Transformers for easy model training and inference, Diffusers for conveniently managing different adapters, and Accelerate for distributed training and inference for really big models. > [!TIP] > Visit the [PEFT](https://huggingface.co/PEFT) organization to read about the PEFT methods implemented in the library and to see notebooks demonstrating how to apply these methods to a variety of downstream tasks. Click the "Watch repos" button on the organization page to be notified of newly implemented methods and notebooks! Check the PEFT Adapters API Reference section for a list of supported PEFT methods, and read the [Adapters](https://huggingface.co/docs/peft/en/conceptual_guides/adapter), [Soft prompts](https://huggingface.co/docs/peft/en/conceptual_guides/prompting), and [IA3](https://huggingface.co/docs/peft/en/conceptual_guides/ia3) conceptual guides to learn more about how these methods work. ## Quickstart Install PEFT from pip: ```bash pip install peft ``` Prepare a model for training with a PEFT method such as LoRA by wrapping the base model and PEFT configuration with `get_peft_model`. For the bigscience/mt0-large model, you're only training 0.19% of the parameters! ```python from transformers import AutoModelForCausalLM from peft import LoraConfig, TaskType, get_peft_model device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" model_id = "Qwen/Qwen2.5-3B-Instruct" model = AutoModelForCausalLM.from_pretrained(model_id, device_map=device) peft_config = LoraConfig( r=16, lora_alpha=32, task_type=TaskType.CAUSAL_LM, # target_modules=["q_proj", "v_proj", ...] # optionally indicate target modules ) model = get_peft_model(model, peft_config) model.print_trainable_parameters() # prints: trainable params: 3,686,400 || all params: 3,089,625,088 || trainable%: 0.1193 # now perform training on your dataset, e.g. using transformers Trainer, then save the model model.save_pretrained("qwen2.5-3b-lora") ``` To load a PEFT model for inference: ```python from transformers import AutoModelForCausalLM, AutoTokenizer from peft import PeftModel device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" model_id = "Qwen/Qwen2.5-3B-Instruct" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, device_map=device) model = PeftModel.from_pretrained(model, "qwen2.5-3b-lora") inputs = tokenizer("Preheat the oven to 350 degrees and place the cookie dough", return_tensors="pt") outputs = model.generate(**inputs.to(device), max_new_tokens=50) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) # prints something like: Preheat the oven to 350 degrees and place the cookie dough in a baking dish [...] ``` ## Why you should use PEFT There are many benefits of using PEFT but the main one is the huge savings in compute and storage, making PEFT applicable to many different use cases. ### High performance on consumer hardware Consider the memory requirements for training the following models on the [ought/raft/twitter_complaints](https://huggingface.co/datasets/ought/raft/viewer/twitter_complaints) dataset with an A100 80GB GPU with more than 64GB of CPU RAM. | Model | Full Finetuning | PEFT-LoRA PyTorch | PEFT-LoRA DeepSpeed with CPU Offloading | | --------- | ---- | ---- | ---- | | bigscience/T0_3B (3B params) | 47.14GB GPU / 2.96GB CPU | 14.4GB GPU / 2.96GB CPU | 9.8GB GPU / 17.8GB CPU | | bigscience/mt0-xxl (12B params) | OOM GPU | 56GB GPU / 3GB CPU | 22GB GPU / 52GB CPU | | bigscience/bloomz-7b1 (7B params) | OOM GPU | 32GB GPU / 3.8GB CPU | 18.1GB GPU / 35GB CPU | With LoRA you can fully finetune a 12B parameter model that would've otherwise run out of memory on the 80GB GPU, and comfortably fit and train a 3B parameter model. When you look at the 3B parameter model's performance, it is comparable to a fully finetuned model at a fraction of the GPU memory. | Submission Name | Accuracy | | --------- | ---- | | Human baseline (crowdsourced) | 0.897 | | Flan-T5 | 0.892 | | lora-t0-3b | 0.863 | > [!TIP] > The bigscience/T0_3B model performance isn't optimized in the table above. You can squeeze even more performance out of it by playing around with the input instruction templates, LoRA hyperparameters, and other training related hyperparameters. The final checkpoint size of this model is just 19MB compared to 11GB of the full bigscience/T0_3B model. Learn more about the advantages of finetuning with PEFT in this [blog post](https://www.philschmid.de/fine-tune-flan-t5-peft). ### Quantization Quantization is another method for reducing the memory requirements of a model by representing the data in a lower precision. It can be combined with PEFT methods to make it even easier to train and load LLMs for inference. * Learn how to finetune [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) with QLoRA and the [TRL](https://huggingface.co/docs/trl/index) library on a 16GB GPU in the [Finetune LLMs on your own consumer hardware using tools from PyTorch and Hugging Face ecosystem](https://pytorch.org/blog/finetune-llms/) blog post. * Learn how to finetune a [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) model for multilingual automatic speech recognition with LoRA and 8-bit quantization in this [notebook](https://colab.research.google.com/drive/1DOkD_5OUjFa0r5Ik3SgywJLJtEo2qLxO?usp=sharing) (see this [notebook](https://colab.research.google.com/drive/1vhF8yueFqha3Y3CpTHN6q9EVcII9EYzs?usp=sharing) instead for an example of streaming a dataset). ### Save compute and storage PEFT can help you save storage by avoiding full finetuning of models on each of downstream task or dataset. In many cases, you're only finetuning a very small fraction of a model's parameters and each checkpoint is only a few MBs in size (instead of GBs). These smaller PEFT adapters demonstrate performance comparable to a fully finetuned model. If you have many datasets, you can save a lot of storage with a PEFT model and not have to worry about catastrophic forgetting or overfitting the backbone or base model. ## PEFT integrations PEFT is widely supported across the Hugging Face ecosystem because of the massive efficiency it brings to training and inference. ### Diffusers The iterative diffusion process consumes a lot of memory which can make it difficult to train. PEFT can help reduce the memory requirements and reduce the storage size of the final model checkpoint. For example, consider the memory required for training a Stable Diffusion model with LoRA on an A100 80GB GPU with more than 64GB of CPU RAM. The final model checkpoint size is only 8.8MB! | Model | Full Finetuning | PEFT-LoRA | PEFT-LoRA with Gradient Checkpointing | | --------- | ---- | ---- | ---- | | CompVis/stable-diffusion-v1-4 | 27.5GB GPU / 3.97GB CPU | 15.5GB GPU / 3.84GB CPU | 8.12GB GPU / 3.77GB CPU | > [!TIP] > Take a look at the [examples/lora_dreambooth/train_dreambooth.py](examples/lora_dreambooth/train_dreambooth.py) training script to try training your own Stable Diffusion model with LoRA, and play around with the [smangrul/peft-lora-sd-dreambooth](https://huggingface.co/spaces/smangrul/peft-lora-sd-dreambooth) Space which is running on a T4 instance. Learn more about the PEFT integration in Diffusers in this [tutorial](https://huggingface.co/docs/peft/main/en/tutorial/peft_integrations#diffusers). ### Transformers PEFT is directly integrated with [Transformers](https://huggingface.co/docs/transformers/main/en/peft). After loading a model, call `add_adapter` to add a new PEFT adapter to the model: ```python from peft import LoraConfig model = ... # transformers model peft_config = LoraConfig(...) model.add_adapter(lora_config, adapter_name="lora_1") ``` To load a trained PEFT adapter, call `load_adapter`: ```python model = ... # transformers model model.load_adapter(<path-to-adapter>, adapter_name="lora_1") ``` And to switch between different adapters, call `set_adapter`: ```python model.set_adapter("lora_2") ``` The Transformers integration doesn't include all the functionalities offered in PEFT, such as methods for merging the adapter into the base model. ### Accelerate [Accelerate](https://huggingface.co/docs/accelerate/index) is a library for distributed training and inference on various training setups and hardware (GPUs, TPUs, Apple Silicon, etc.). PEFT models work with Accelerate out of the box, making it really convenient to train really large models or use them for inference on consumer hardware with limited resources. ### TRL PEFT can also be applied to training LLMs with RLHF components such as the ranker and policy. Get started by reading: * [Fine-tune a Mistral-7b model with Direct Preference Optimization](https://towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library to learn more about the Direct Preference Optimization (DPO) method and how to apply it to a LLM. * [Fine-tuning 20B LLMs with RLHF on a 24GB consumer GPU](https://huggingface.co/blog/trl-peft) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library, and then try out the [gpt2-sentiment_peft.ipynb](https://github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb) notebook to optimize GPT2 to generate positive movie reviews. * [StackLLaMA: A hands-on guide to train LLaMA with RLHF](https://huggingface.co/blog/stackllama) with PEFT, and then try out the [stack_llama/scripts](https://github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts) for supervised finetuning, reward modeling, and RL finetuning. ## Model support Use this [Space](https://stevhliu-peft-methods.hf.space) or check out the [docs](https://huggingface.co/docs/peft/main/en/index) to find which models officially support a PEFT method out of the box. Even if you don't see a model listed below, you can manually configure the model config to enable PEFT for a model. Read the [New transformers architecture](https://huggingface.co/docs/peft/main/en/developer_guides/custom_models#new-transformers-architectures) guide to learn how. ## Contribute If you would like to contribute to PEFT, please check out our [contribution guide](https://huggingface.co/docs/peft/developer_guides/contributing). ## Citing 🤗 PEFT To use 🤗 PEFT in your publication, please cite it by using the following BibTeX entry. ```bibtex @Misc{peft, title = {{PEFT}: State-of-the-art Parameter-Efficient Fine-Tuning methods}, author = {Sourab Mangrulkar and Sylvain Gugger and Lysandre Debut and Younes Belkada and Sayak Paul and Benjamin Bossan}, howpublished = {\url{https://github.com/huggingface/peft}}, year = {2022} } ```
peft/README.md/0
{ "file_path": "peft/README.md", "repo_id": "peft", "token_count": 3732 }
198
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PEFT checkpoint format This document describes how PEFT's checkpoint files are structured and how to convert between the PEFT format and other formats. ## PEFT files PEFT (parameter-efficient fine-tuning) methods only update a small subset of a model's parameters rather than all of them. This is nice because checkpoint files can generally be much smaller than the original model files and are easier to store and share. However, this also means that to load a PEFT model, you need to have the original model available as well. When you call [`~PeftModel.save_pretrained`] on a PEFT model, the PEFT model saves three files, described below: 1. `adapter_model.safetensors` or `adapter_model.bin` By default, the model is saved in the `safetensors` format, a secure alternative to the `bin` format, which is known to be susceptible to [security vulnerabilities](https://huggingface.co/docs/hub/security-pickle) because it uses the pickle utility under the hood. Both formats store the same `state_dict` though, and are interchangeable. The `state_dict` only contains the parameters of the adapter module, not the base model. To illustrate the difference in size, a normal BERT model requires ~420MB of disk space, whereas an IA³ adapter on top of this BERT model only requires ~260KB. 2. `adapter_config.json` The `adapter_config.json` file contains the configuration of the adapter module, which is necessary to load the model. Below is an example of an `adapter_config.json` for an IA³ adapter with standard settings applied to a BERT model: ```json { "auto_mapping": { "base_model_class": "BertModel", "parent_library": "transformers.models.bert.modeling_bert" }, "base_model_name_or_path": "bert-base-uncased", "fan_in_fan_out": false, "feedforward_modules": [ "output.dense" ], "inference_mode": true, "init_ia3_weights": true, "modules_to_save": null, "peft_type": "IA3", "revision": null, "target_modules": [ "key", "value", "output.dense" ], "task_type": null } ``` The configuration file contains: - the adapter module type stored, `"peft_type": "IA3"` - information about the base model like `"base_model_name_or_path": "bert-base-uncased"` - the revision of the model (if any), `"revision": null` If the base model is not a pretrained Transformers model, the latter two entries will be `null`. Other than that, the settings are all related to the specific IA³ adapter that was used to fine-tune the model. 3. `README.md` The generated `README.md` is the model card of a PEFT model and contains a few pre-filled entries. The intent of this is to make it easier to share the model with others and to provide some basic information about the model. This file is not needed to load the model. ## Convert to PEFT format When converting from another format to the PEFT format, we require both the `adapter_model.safetensors` (or `adapter_model.bin`) file and the `adapter_config.json` file. ### adapter_model For the model weights, it is important to use the correct mapping from parameter name to value for PEFT to load the file. Getting this mapping right is an exercise in checking the implementation details, as there is no generally agreed upon format for PEFT adapters. Fortunately, figuring out this mapping is not overly complicated for common base cases. Let's look at a concrete example, the [`LoraLayer`](https://github.com/huggingface/peft/blob/main/src/peft/tuners/lora/layer.py): ```python # showing only part of the code class LoraLayer(BaseTunerLayer): # All names of layers that may contain (trainable) adapter weights adapter_layer_names = ("lora_A", "lora_B", "lora_embedding_A", "lora_embedding_B") # All names of other parameters that may contain adapter-related parameters other_param_names = ("r", "lora_alpha", "scaling", "lora_dropout") def __init__(self, base_layer: nn.Module, **kwargs) -> None: self.base_layer = base_layer self.r = {} self.lora_alpha = {} self.scaling = {} self.lora_dropout = nn.ModuleDict({}) self.lora_A = nn.ModuleDict({}) self.lora_B = nn.ModuleDict({}) # For Embedding layer self.lora_embedding_A = nn.ParameterDict({}) self.lora_embedding_B = nn.ParameterDict({}) # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] self.use_dora: dict[str, bool] = {} self.lora_magnitude_vector: Optional[torch.nn.ParameterDict] = None # for DoRA self._caches: dict[str, Any] = {} self.kwargs = kwargs ``` In the `__init__` code used by all `LoraLayer` classes in PEFT, there are a bunch of parameters used to initialize the model, but only a few are relevant for the checkpoint file: `lora_A`, `lora_B`, `lora_embedding_A`, and `lora_embedding_B`. These parameters are listed in the class attribute `adapter_layer_names` and contain the learnable parameters, so they must be included in the checkpoint file. All the other parameters, like the rank `r`, are derived from the `adapter_config.json` and must be included there (unless the default value is used). Let's check the `state_dict` of a PEFT LoRA model applied to BERT. When printing the first five keys using the default LoRA settings (the remaining keys are the same, just with different layer numbers), we get: - `base_model.model.encoder.layer.0.attention.self.query.lora_A.weight` - `base_model.model.encoder.layer.0.attention.self.query.lora_B.weight` - `base_model.model.encoder.layer.0.attention.self.value.lora_A.weight` - `base_model.model.encoder.layer.0.attention.self.value.lora_B.weight` - `base_model.model.encoder.layer.1.attention.self.query.lora_A.weight` - etc. Let's break this down: - By default, for BERT models, LoRA is applied to the `query` and `value` layers of the attention module. This is why you see `attention.self.query` and `attention.self.value` in the key names for each layer. - LoRA decomposes the weights into two low-rank matrices, `lora_A` and `lora_B`. This is where `lora_A` and `lora_B` come from in the key names. - These LoRA matrices are implemented as `nn.Linear` layers, so the parameters are stored in the `.weight` attribute (`lora_A.weight`, `lora_B.weight`). - By default, LoRA isn't applied to BERT's embedding layer, so there are _no entries_ for `lora_A_embedding` and `lora_B_embedding`. - The keys of the `state_dict` always start with `"base_model.model."`. The reason is that, in PEFT, we wrap the base model inside a tuner-specific model (`LoraModel` in this case), which itself is wrapped in a general PEFT model (`PeftModel`). For this reason, these two prefixes are added to the keys. When converting to the PEFT format, it is required to add these prefixes. <Tip> This last point is not true for prefix tuning techniques like prompt tuning. There, the extra embeddings are directly stored in the `state_dict` without any prefixes added to the keys. </Tip> When inspecting the parameter names in the loaded model, you might be surprised to find that they look a bit different, e.g. `base_model.model.encoder.layer.0.attention.self.query.lora_A.default.weight`. The difference is the *`.default`* part in the second to last segment. This part exists because PEFT generally allows the addition of multiple adapters at once (using an `nn.ModuleDict` or `nn.ParameterDict` to store them). For example, if you add another adapter called "other", the key for that adapter would be `base_model.model.encoder.layer.0.attention.self.query.lora_A.other.weight`. When you call [`~PeftModel.save_pretrained`], the adapter name is stripped from the keys. The reason is that the adapter name is not an important part of the model architecture; it is just an arbitrary name. When loading the adapter, you could choose a totally different name, and the model would still work the same way. This is why the adapter name is not stored in the checkpoint file. <Tip> If you call `save_pretrained("some/path")` and the adapter name is not `"default"`, the adapter is stored in a sub-directory with the same name as the adapter. So if the name is "other", it would be stored inside of `some/path/other`. </Tip> In some circumstances, deciding which values to add to the checkpoint file can become a bit more complicated. For example, in PEFT, DoRA is implemented as a special case of LoRA. If you want to convert a DoRA model to PEFT, you should create a LoRA checkpoint with extra entries for DoRA. You can see this in the `__init__` of the previous `LoraLayer` code: ```python self.lora_magnitude_vector: Optional[torch.nn.ParameterDict] = None # for DoRA ``` This indicates that there is an optional extra parameter per layer for DoRA. ### adapter_config All the other information needed to load a PEFT model is contained in the `adapter_config.json` file. Let's check this file for a LoRA model applied to BERT: ```json { "alpha_pattern": {}, "auto_mapping": { "base_model_class": "BertModel", "parent_library": "transformers.models.bert.modeling_bert" }, "base_model_name_or_path": "bert-base-uncased", "bias": "none", "fan_in_fan_out": false, "inference_mode": true, "init_lora_weights": true, "layer_replication": null, "layers_pattern": null, "layers_to_transform": null, "loftq_config": {}, "lora_alpha": 8, "lora_dropout": 0.0, "megatron_config": null, "megatron_core": "megatron.core", "modules_to_save": null, "peft_type": "LORA", "r": 8, "rank_pattern": {}, "revision": null, "target_modules": [ "query", "value" ], "task_type": null, "use_dora": false, "use_rslora": false } ``` This contains a lot of entries, and at first glance, it could feel overwhelming to figure out all the right values to put in there. However, most of the entries are not necessary to load the model. This is either because they use the default values and don't need to be added or because they only affect the initialization of the LoRA weights, which is irrelevant when it comes to loading the model. If you find that you don't know what a specific parameter does, e.g., `"use_rslora",` don't add it, and you should be fine. Also note that as more options are added, this file will get more entries in the future, but it should be backward compatible. At the minimum, you should include the following entries: ```json { "target_modules": ["query", "value"], "peft_type": "LORA" } ``` However, adding as many entries as possible, like the rank `r` or the `base_model_name_or_path` (if it's a Transformers model) is recommended. This information can help others understand the model better and share it more easily. To check which keys and values are expected, check out the [config.py](https://github.com/huggingface/peft/blob/main/src/peft/tuners/lora/config.py) file (as an example, this is the config file for LoRA) in the PEFT source code. ## Model storage In some circumstances, you might want to store the whole PEFT model, including the base weights. This can be necessary if, for instance, the base model is not available to the users trying to load the PEFT model. You can merge the weights first or convert it into a Transformer model. ### Merge the weights The most straightforward way to store the whole PEFT model is to merge the adapter weights into the base weights: ```python merged_model = model.merge_and_unload() merged_model.save_pretrained(...) ``` There are some disadvantages to this approach, though: - Once [`~LoraModel.merge_and_unload`] is called, you get a basic model without any PEFT-specific functionality. This means you can't use any of the PEFT-specific methods anymore. - You cannot unmerge the weights, load multiple adapters at once, disable the adapter, etc. - Not all PEFT methods support merging weights. - Some PEFT methods may generally allow merging, but not with specific settings (e.g. when using certain quantization techniques). - The whole model will be much larger than the PEFT model, as it will contain all the base weights as well. But inference with a merged model should be a bit faster. ### Convert to a Transformers model Another way to save the whole model, assuming the base model is a Transformers model, is to use this hacky approach to directly insert the PEFT weights into the base model and save it, which only works if you "trick" Transformers into believing the PEFT model is not a PEFT model. This only works with LoRA because other adapters are not implemented in Transformers. ```python model = ... # the PEFT model ... # after you finish training the model, save it in a temporary location model.save_pretrained(<temp_location>) # now load this model directly into a transformers model, without the PEFT wrapper # the PEFT weights are directly injected into the base model model_loaded = AutoModel.from_pretrained(<temp_location>) # now make the loaded model believe that it is _not_ a PEFT model model_loaded._hf_peft_config_loaded = False # now when we save it, it will save the whole model model_loaded.save_pretrained(<final_location>) # or upload to Hugging Face Hub model_loaded.push_to_hub(<final_location>) ```
peft/docs/source/developer_guides/checkpoint.md/0
{ "file_path": "peft/docs/source/developer_guides/checkpoint.md", "repo_id": "peft", "token_count": 4146 }
199
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Multitask prompt tuning [Multitask prompt tuning](https://huggingface.co/papers/2303.02861) decomposes the soft prompts of each task into a single learned transferable prompt instead of a separate prompt for each task. The single learned prompt can be adapted for each task by multiplicative low rank updates. The abstract from the paper is: *Prompt tuning, in which a base pretrained model is adapted to each task via conditioning on learned prompt vectors, has emerged as a promising approach for efficiently adapting large language models to multiple downstream tasks. However, existing methods typically learn soft prompt vectors from scratch, and it has not been clear how to exploit the rich cross-task knowledge with prompt vectors in a multitask learning setting. We propose multitask prompt tuning (MPT), which first learns a single transferable prompt by distilling knowledge from multiple task-specific source prompts. We then learn multiplicative low rank updates to this shared prompt to efficiently adapt it to each downstream target task. Extensive experiments on 23 NLP datasets demonstrate that our proposed approach outperforms the state-of-the-art methods, including the full finetuning baseline in some cases, despite only tuning 0.035% as many task-specific parameters*. ## MultitaskPromptTuningConfig [[autodoc]] tuners.multitask_prompt_tuning.config.MultitaskPromptTuningConfig ## MultitaskPromptEmbedding [[autodoc]] tuners.multitask_prompt_tuning.model.MultitaskPromptEmbedding
peft/docs/source/package_reference/multitask_prompt_tuning.md/0
{ "file_path": "peft/docs/source/package_reference/multitask_prompt_tuning.md", "repo_id": "peft", "token_count": 533 }
200
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quicktour PEFT offers parameter-efficient methods for finetuning large pretrained models. The traditional paradigm is to finetune all of a model's parameters for each downstream task, but this is becoming exceedingly costly and impractical because of the enormous number of parameters in models today. Instead, it is more efficient to train a smaller number of prompt parameters or use a reparametrization method like low-rank adaptation (LoRA) to reduce the number of trainable parameters. This quicktour will show you PEFT's main features and how you can train or run inference on large models that would typically be inaccessible on consumer devices. ## Train Each PEFT method is defined by a [`PeftConfig`] class that stores all the important parameters for building a [`PeftModel`]. For example, to train with LoRA, load and create a [`LoraConfig`] class and specify the following parameters: - `task_type`: the task to train for (sequence-to-sequence language modeling in this case) - `inference_mode`: whether you're using the model for inference or not - `r`: the dimension of the low-rank matrices - `lora_alpha`: the scaling factor for the low-rank matrices - `lora_dropout`: the dropout probability of the LoRA layers ```python from peft import LoraConfig, TaskType peft_config = LoraConfig(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1) ``` <Tip> See the [`LoraConfig`] reference for more details about other parameters you can adjust, such as the modules to target or the bias type. </Tip> Once the [`LoraConfig`] is setup, create a [`PeftModel`] with the [`get_peft_model`] function. It takes a base model - which you can load from the Transformers library - and the [`LoraConfig`] containing the parameters for how to configure a model for training with LoRA. Load the base model you want to finetune. ```python from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/mt0-large") ``` Wrap the base model and `peft_config` with the [`get_peft_model`] function to create a [`PeftModel`]. To get a sense of the number of trainable parameters in your model, use the [`print_trainable_parameters`] method. ```python from peft import get_peft_model model = get_peft_model(model, peft_config) model.print_trainable_parameters() "output: trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282" ``` Out of [bigscience/mt0-large's](https://huggingface.co/bigscience/mt0-large) 1.2B parameters, you're only training 0.19% of them! That is it 🎉! Now you can train the model with the Transformers [`~transformers.Trainer`], Accelerate, or any custom PyTorch training loop. For example, to train with the [`~transformers.Trainer`] class, setup a [`~transformers.TrainingArguments`] class with some training hyperparameters. ```py training_args = TrainingArguments( output_dir="your-name/bigscience/mt0-large-lora", learning_rate=1e-3, per_device_train_batch_size=32, per_device_eval_batch_size=32, num_train_epochs=2, weight_decay=0.01, eval_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, ) ``` Pass the model, training arguments, dataset, tokenizer, and any other necessary component to the [`~transformers.Trainer`], and call [`~transformers.Trainer.train`] to start training. ```py trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["test"], processing_class=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) trainer.train() ``` ### Save model After your model is finished training, you can save your model to a directory using the [`~transformers.PreTrainedModel.save_pretrained`] function. ```py model.save_pretrained("output_dir") ``` You can also save your model to the Hub (make sure you're logged in to your Hugging Face account first) with the [`~transformers.PreTrainedModel.push_to_hub`] function. ```python from huggingface_hub import notebook_login notebook_login() model.push_to_hub("your-name/bigscience/mt0-large-lora") ``` Both methods only save the extra PEFT weights that were trained, meaning it is super efficient to store, transfer, and load. For example, this [facebook/opt-350m](https://huggingface.co/ybelkada/opt-350m-lora) model trained with LoRA only contains two files: `adapter_config.json` and `adapter_model.safetensors`. The `adapter_model.safetensors` file is just 6.3MB! <div class="flex flex-col justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/> <figcaption class="text-center">The adapter weights for a opt-350m model stored on the Hub are only ~6MB compared to the full size of the model weights, which can be ~700MB.</figcaption> </div> ## Inference <Tip> Take a look at the [AutoPeftModel](package_reference/auto_class) API reference for a complete list of available `AutoPeftModel` classes. </Tip> Easily load any PEFT-trained model for inference with the [`AutoPeftModel`] class and the [`~transformers.PreTrainedModel.from_pretrained`] method: ```py from peft import AutoPeftModelForCausalLM from transformers import AutoTokenizer import torch model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") model = model.to("cuda") model.eval() inputs = tokenizer("Preheat the oven to 350 degrees and place the cookie dough", return_tensors="pt") outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=50) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0]) "Preheat the oven to 350 degrees and place the cookie dough in the center of the oven. In a large bowl, combine the flour, baking powder, baking soda, salt, and cinnamon. In a separate bowl, combine the egg yolks, sugar, and vanilla." ``` For other tasks that aren't explicitly supported with an `AutoPeftModelFor` class - such as automatic speech recognition - you can still use the base [`AutoPeftModel`] class to load a model for the task. ```py from peft import AutoPeftModel model = AutoPeftModel.from_pretrained("smangrul/openai-whisper-large-v2-LORA-colab") ``` ## Next steps Now that you've seen how to train a model with one of the PEFT methods, we encourage you to try out some of the other methods like prompt tuning. The steps are very similar to the ones shown in the quicktour: 1. prepare a [`PeftConfig`] for a PEFT method 2. use the [`get_peft_model`] method to create a [`PeftModel`] from the configuration and base model Then you can train it however you like! To load a PEFT model for inference, you can use the [`AutoPeftModel`] class. Feel free to also take a look at the task guides if you're interested in training a model with another PEFT method for a specific task such as semantic segmentation, multilingual automatic speech recognition, DreamBooth, token classification, and more.
peft/docs/source/quicktour.md/0
{ "file_path": "peft/docs/source/quicktour.md", "repo_id": "peft", "token_count": 2385 }
201
import argparse import os from typing import Optional from huggingface_hub import HfFolder, whoami from transformers import PretrainedConfig def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): if token is None: token = HfFolder.get_token() if organization is None: username = whoami(token)["name"] return f"{username}/{model_id}" else: return f"{organization}/{model_id}" def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesModelWithTransformation, ) return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a ControlNet training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--controlnet_model_name_or_path", type=str, default=None, help="Path to pretrained controlnet model or model identifier from huggingface.co/models." " If not specified controlnet weights are initialized from unet.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help=( "Revision of pretrained model identifier from huggingface.co/models. Trainable model components should be" " float32 precision." ), ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--output_dir", type=str, default="controlnet-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. " "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference." "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components." "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step" "instructions." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="wandb", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--wandb_key", type=str, default=None, help=("If report to option is set to wandb, api-key for wandb used for login to wandb "), ) parser.add_argument( "--wandb_project_name", type=str, default=None, help=("If report to option is set to wandb, project name in wandb for log tracking "), ) parser.add_argument( "--wandb_run_name", type=str, default=None, help=("If report to option is set to wandb, project name in wandb for log tracking "), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--set_grads_to_none", action="store_true", help=( "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain" " behaviors, so disable this argument if it causes any problems. More info:" " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html" ), ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing the target image." ) parser.add_argument( "--conditioning_image_column", type=str, default="conditioning_image", help="The column of the dataset containing the controlnet conditioning image.", ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--proportion_empty_prompts", type=float, default=0, help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", ) parser.add_argument( "--validation_prompt", type=str, default=None, nargs="+", help=( "A set of prompts evaluated every `--validation_steps` and logged to `--report_to`." " Provide either a matching number of `--validation_image`s, a single `--validation_image`" " to be used with all prompts, or a single prompt that will be used with all `--validation_image`s." ), ) parser.add_argument( "--validation_image", type=str, default=None, nargs="+", help=( "A set of paths to the controlnet conditioning image be evaluated every `--validation_steps`" " and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a" " a single `--validation_prompt` to be used with all `--validation_image`s, or a single" " `--validation_image` that will be used with all `--validation_prompt`s." ), ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images to be generated for each `--validation_image`, `--validation_prompt` pair", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run validation every X steps. Validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`" " and logging the images." ), ) parser.add_argument( "--tracker_project_name", type=str, default="train_controlnet", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) # evaluation arguments parser.add_argument("--controlnet_path", type=str, default=None, help="Path to pretrained controlnet.") parser.add_argument("--unet_path", type=str, default=None, help="Path to pretrained unet.") parser.add_argument("--adapter_name", type=str, default=None, help="Name of the adapter to use.") parser.add_argument("--vis_overlays", action="store_true", help="Whether to visualize the landmarks.") # self-invented arguments parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--name", type=str, help=("The name of the current experiment run, consists of [data]-[prompt]"), ) # BOFT args parser.add_argument("--use_boft", action="store_true", help="Whether to use BOFT for parameter efficient tuning") parser.add_argument("--boft_block_num", type=int, default=8, help="The number of BOFT blocks") parser.add_argument("--boft_block_size", type=int, default=0, help="The size of BOFT blocks") parser.add_argument("--boft_n_butterfly_factor", type=int, default=0, help="The number of butterfly factors") parser.add_argument("--boft_dropout", type=float, default=0.1, help="BOFT dropout, only used if use_boft is True") parser.add_argument( "--boft_bias", type=str, default="none", help="Bias type for BOFT. Can be 'none', 'all' or 'boft_only', only used if use_boft is True", ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Specify either `--dataset_name` or `--train_data_dir`") if args.dataset_name is not None and args.train_data_dir is not None: raise ValueError("Specify only one of `--dataset_name` or `--train_data_dir`") if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") if args.validation_prompt is not None and args.validation_image is None: raise ValueError("`--validation_image` must be set if `--validation_prompt` is set") if args.validation_prompt is None and args.validation_image is not None: raise ValueError("`--validation_prompt` must be set if `--validation_image` is set") if ( args.validation_image is not None and args.validation_prompt is not None and len(args.validation_image) != 1 and len(args.validation_prompt) != 1 and len(args.validation_image) != len(args.validation_prompt) ): raise ValueError( "Must provide either 1 `--validation_image`, 1 `--validation_prompt`," " or the same number of `--validation_prompt`s and `--validation_image`s" ) if args.resolution % 8 != 0: raise ValueError( "`--resolution` must be divisible by 8 for consistently sized encoded images between the VAE and the controlnet encoder." ) return args
peft/examples/boft_controlnet/utils/args_loader.py/0
{ "file_path": "peft/examples/boft_controlnet/utils/args_loader.py", "repo_id": "peft", "token_count": 7255 }
202
import os import torch from accelerate import Accelerator from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup from peft import LoraConfig, TaskType, get_peft_model from peft.utils.other import fsdp_auto_wrap_policy def main(): accelerator = Accelerator() model_name_or_path = "t5-base" batch_size = 8 text_column = "sentence" label_column = "label" max_length = 64 lr = 1e-3 num_epochs = 1 base_path = "temp/data/FinancialPhraseBank-v1.0" peft_config = LoraConfig( task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) accelerator.print(model.print_trainable_parameters()) dataset = load_dataset( "json", data_files={ "train": os.path.join(base_path, "financial_phrase_bank_train.jsonl"), "validation": os.path.join(base_path, "financial_phrase_bank_val.jsonl"), }, ) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer( inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt" ) labels = tokenizer(targets, max_length=2, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs with accelerator.main_process_first(): processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) if getattr(accelerator.state, "fsdp_plugin", None) is not None: accelerator.state.fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(model) model, train_dataloader, eval_dataloader, optimizer, lr_scheduler = accelerator.prepare( model, train_dataloader, eval_dataloader, optimizer, lr_scheduler ) accelerator.print(model) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() preds = accelerator.gather_for_metrics(torch.argmax(outputs.logits, -1)).detach().cpu().numpy() eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) accelerator.print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}") correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["validation"][label_column]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 accelerator.print(f"{accuracy=}") accelerator.print(f"{eval_preds[:10]=}") accelerator.print(f"{dataset['validation'][label_column][:10]=}") accelerator.wait_for_everyone() # Option1: Pushing the model to Hugging Face Hub # model.push_to_hub( # f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"), # token = "hf_..." # ) # token (`bool` or `str`, *optional*): # `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated # when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` # is not specified. # Or you can get your token from https://huggingface.co/settings/token # Option2: Saving the model locally peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_") model.save_pretrained(peft_model_id) accelerator.wait_for_everyone() if __name__ == "__main__": main()
peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_fsdp.py/0
{ "file_path": "peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_fsdp.py", "repo_id": "peft", "token_count": 2543 }
203
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import torch import torch.nn as nn from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, ) from peft import LoftQConfig, LoraConfig, TaskType, get_peft_model class Shell(nn.Module): def __init__(self, weight, bias=None): super().__init__() self.weight = nn.Parameter(weight, requires_grad=False) if bias is not None: self.bias = nn.Parameter(bias, requires_grad=False) def unwrap_model(model, sub_module_name=".base_layer"): sub_module_name_list = [k.split(sub_module_name)[0] for k in model.state_dict().keys() if sub_module_name in k] sub_module_name_set = set(sub_module_name_list) for name in sub_module_name_set: # get the parent of the submodule name_parent = ".".join(name.split(".")[:-1]) name_child = name.split(".")[-1] sub_module = model.get_submodule(name_parent) print(sub_module) # replace with shell child = getattr(sub_module, name_child) weight = getattr(child.base_layer, "weight", None) bias = getattr(child.base_layer, "bias", None) shell = Shell(weight, bias) setattr(sub_module, name_child, shell) print("You have unwrapped the model. Use it on your own risk.") def print_model(model, name): print("=" * 10 + name + "=" * 10) print(model) for name, param in model.named_parameters(): if torch.is_tensor(param): if param.dtype in [torch.float32, torch.float16]: print( name, param.shape, param.device, param.dtype, param.requires_grad, param.mean().item(), param.max().item(), ) else: print(name, param.shape, param.device, param.dtype, param.requires_grad) def arg_parse(): parser = argparse.ArgumentParser(description="Quantize a model with LoftQ.") parser.add_argument( "--model_name_or_path", type=str, default=None, required=True, help="The name or path of the fp32/16 model.", ) parser.add_argument( "--token", type=str, default=None, help="The access token to download model from HuggingFace Hub.", ) parser.add_argument( "--bits", type=int, default=4, help="The quantized bits", ) parser.add_argument( "--iter", type=int, default=1, help="The alternating steps in LoftQ", ) parser.add_argument( "--rank", type=int, default=16, help="The rank of the LoRA adapter", ) parser.add_argument( "--save_dir", type=str, default="./model_zoo/loftq/", help="The rank of the LoRA adapter", ) args = parser.parse_args() return args def quantize_and_save(): args = arg_parse() # Download weights and configure LoRA tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, token=args.token, trust_remote_code=True) if any(name in args.model_name_or_path.lower() for name in ["llama", "mistral", "falcon"]): model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path, token=args.token, trust_remote_code=True) task_type = TaskType.CAUSAL_LM target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "down_proj", "gate_proj"] elif any(name in args.model_name_or_path.lower() for name in ["bart", "t5"]): model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path, token=args.token) task_type = TaskType.SEQ_2_SEQ_LM target_modules = ["q_proj", "k_proj", "v_proj", "fc1", "fc2", "out_proj"] elif any(name in args.model_name_or_path.lower() for name in ["deberta", "roberta", "bert"]): model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, token=args.token) task_type = TaskType.SEQ_CLS target_modules = ["query_proj", "key_proj", "value_proj", "dense"] # embeddings not supported by peft else: raise NotImplementedError("Other models not supported yet.") # Config of LoftQ loftq_config = LoftQConfig(loftq_bits=args.bits, loftq_iter=args.iter) lora_config = LoraConfig( task_type=task_type, inference_mode=True, r=args.rank, lora_alpha=16 if task_type is TaskType.CAUSAL_LM else args.rank, lora_dropout=0.1, target_modules=target_modules, init_lora_weights="loftq", loftq_config=loftq_config, ) # Obtain LoftQ model lora_model = get_peft_model(model, lora_config) base_model = lora_model.get_base_model() # Save LoftQ model model_name = args.model_name_or_path.split("/")[-1] + f"-{args.bits}bit" + f"-{args.rank}rank" base_model_dir = os.path.join(args.save_dir, model_name) lora_model_dir = os.path.join(args.save_dir, model_name, "loft_init") # save lora adapters first lora_model.base_model.peft_config[ "default" ].base_model_name_or_path = base_model_dir # This can be a local path or Hub model id lora_model.base_model.peft_config["default"].init_lora_weights = True # Don't apply LoftQ when loading again lora_model.save_pretrained(lora_model_dir) print_model(lora_model, "lora_model") # remove lora adapters and save the backbone unwrap_model(base_model) base_model.save_pretrained(base_model_dir) tokenizer.save_pretrained(base_model_dir) print_model(base_model, "base_model") return base_model_dir, lora_model_dir if __name__ == "__main__": base_dir, lora_dir = quantize_and_save() # example command: # python quantize_save_load.py \ # --model_name_or_path meta-llama/Llama-2-7b-hf \ # --token XXX \ # --bits 4 --iter 5 --rank 16 \ # --save_dir ./model_zoo/loftq/
peft/examples/loftq_finetuning/quantize_save_load.py/0
{ "file_path": "peft/examples/loftq_finetuning/quantize_save_load.py", "repo_id": "peft", "token_count": 2835 }
204
<jupyter_start><jupyter_text>Using PEFT with custom models `peft` allows us to fine-tune models efficiently with LoRA. In this short notebook, we will demonstrate how to train a simple multilayer perceptron (MLP) using `peft`. Imports Make sure that you have the latest version of `peft` installed. To ensure that, run this in your Python environment: python -m pip install --upgrade peft<jupyter_code>import copy import os # ignore bnb warnings os.environ["BITSANDBYTES_NOWELCOME"] = "1" import peft import torch from torch import nn import torch.nn.functional as F torch.manual_seed(0)<jupyter_output><empty_output><jupyter_text>Data We will create a toy dataset consisting of random data for a classification task. There is a little bit of signal in the data, so we should expect that the loss of the model can improve during training.<jupyter_code>X = torch.rand((1000, 20)) y = (X.sum(1) > 10).long() n_train = 800 batch_size = 64 train_dataloader = torch.utils.data.DataLoader( torch.utils.data.TensorDataset(X[:n_train], y[:n_train]), batch_size=batch_size, shuffle=True, ) eval_dataloader = torch.utils.data.DataLoader( torch.utils.data.TensorDataset(X[n_train:], y[n_train:]), batch_size=batch_size, )<jupyter_output><empty_output><jupyter_text>Model As a model, we use a simple multilayer perceptron (MLP). For demonstration purposes, we use a very large number of hidden units. This is totally overkill for this task but it helps to demonstrate the advantages of `peft`. In more realistic settings, models will also be quite large on average, so this is not far-fetched.<jupyter_code>class MLP(nn.Module): def __init__(self, num_units_hidden=2000): super().__init__() self.seq = nn.Sequential( nn.Linear(20, num_units_hidden), nn.ReLU(), nn.Linear(num_units_hidden, num_units_hidden), nn.ReLU(), nn.Linear(num_units_hidden, 2), nn.LogSoftmax(dim=-1), ) def forward(self, X): return self.seq(X)<jupyter_output><empty_output><jupyter_text>Training Here are just a few training hyper-parameters and a simple function that performs the training and evaluation loop.<jupyter_code>lr = 0.002 batch_size = 64 max_epochs = 30 device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" def train(model, optimizer, criterion, train_dataloader, eval_dataloader, epochs): for epoch in range(epochs): model.train() train_loss = 0 for xb, yb in train_dataloader: xb = xb.to(device) yb = yb.to(device) outputs = model(xb) loss = criterion(outputs, yb) train_loss += loss.detach().float() loss.backward() optimizer.step() optimizer.zero_grad() model.eval() eval_loss = 0 for xb, yb in eval_dataloader: xb = xb.to(device) yb = yb.to(device) with torch.no_grad(): outputs = model(xb) loss = criterion(outputs, yb) eval_loss += loss.detach().float() eval_loss_total = (eval_loss / len(eval_dataloader)).item() train_loss_total = (train_loss / len(train_dataloader)).item() print(f"{epoch=:<2} {train_loss_total=:.4f} {eval_loss_total=:.4f}")<jupyter_output><empty_output><jupyter_text>Training without peft Let's start without using `peft` to see what we can expect from the model training.<jupyter_code>module = MLP().to(device) optimizer = torch.optim.Adam(module.parameters(), lr=lr) criterion = nn.CrossEntropyLoss() %time train(module, optimizer, criterion, train_dataloader, eval_dataloader, epochs=max_epochs)<jupyter_output>epoch=0 train_loss_total=0.7970 eval_loss_total=0.6472 epoch=1 train_loss_total=0.5597 eval_loss_total=0.4898 epoch=2 train_loss_total=0.3696 eval_loss_total=0.3323 epoch=3 train_loss_total=0.2364 eval_loss_total=0.5454 epoch=4 train_loss_total=0.2428 eval_loss_total=0.2843 epoch=5 train_loss_total=0.1251 eval_loss_total=0.2514 epoch=6 train_loss_total=0.0952 eval_loss_total=0.2068 epoch=7 train_loss_total=0.0831 eval_loss_total=0.2395 epoch=8 train_loss_total=0.0655 eval_loss_total=0.2524 epoch=9 train_loss_total=0.0380 eval_loss_total=0.3650 epoch=10 train_loss_total=0.0363 eval_loss_total=0.3495 epoch=11 train_loss_total=0.0231 eval_loss_total=0.2360 epoch=12 train_loss_total=0.0162 eval_loss_total=0.2276 epoch=13 train_loss_total=0.0094 eval_loss_total=0.2716 epoch=14 train_loss_total=0.0065 eval_loss_total=0.2237 epoch=15 train_loss_total=0.0054 eval_loss_total=0.2366 epoch=16 train_loss_total=0.0035 eval_loss_total=0.2673 epoch=17 trai[...]<jupyter_text>Okay, so we got an eval loss of ~0.26, which is much better than random. Training with peft Now let's train with `peft`. First we check the names of the modules, so that we can configure `peft` to fine-tune the right modules.<jupyter_code>[(n, type(m)) for n, m in MLP().named_modules()]<jupyter_output><empty_output><jupyter_text>Next we can define the LoRA config. There is nothing special going on here. We set the LoRA rank to 8 and select the layers `seq.0` and `seq.2` to be used for LoRA fine-tuning. As for `seq.4`, which is the output layer, we set it as `module_to_save`, which means it is also trained but no LoRA is applied. *Note: Not all layers types can be fine-tuned with LoRA. At the moment, linear layers, embeddings, `Conv2D` and `transformers.pytorch_utils.Conv1D` are supported.<jupyter_code>config = peft.LoraConfig( r=8, target_modules=["seq.0", "seq.2"], modules_to_save=["seq.4"], )<jupyter_output><empty_output><jupyter_text>Now let's create the `peft` model by passing our initial MLP, as well as the config we just defined, to `get_peft_model`.<jupyter_code>module = MLP().to(device) module_copy = copy.deepcopy(module) # we keep a copy of the original model for later peft_model = peft.get_peft_model(module, config) optimizer = torch.optim.Adam(peft_model.parameters(), lr=lr) criterion = nn.CrossEntropyLoss() peft_model.print_trainable_parameters()<jupyter_output><empty_output><jupyter_text>Checking the numbers, we see that only ~1% of parameters are actually trained, which is what we like to see.Now let's start the training:<jupyter_code>%time train(peft_model, optimizer, criterion, train_dataloader, eval_dataloader, epochs=max_epochs)<jupyter_output>epoch=0 train_loss_total=0.6695 eval_loss_total=0.6388 epoch=1 train_loss_total=0.5614 eval_loss_total=0.5456 epoch=2 train_loss_total=0.3897 eval_loss_total=0.3035 epoch=3 train_loss_total=0.2529 eval_loss_total=0.2510 epoch=4 train_loss_total=0.1914 eval_loss_total=0.2191 epoch=5 train_loss_total=0.1236 eval_loss_total=0.2586 epoch=6 train_loss_total=0.1076 eval_loss_total=0.3205 epoch=7 train_loss_total=0.1834 eval_loss_total=0.3951 epoch=8 train_loss_total=0.1037 eval_loss_total=0.1646 epoch=9 train_loss_total=0.0724 eval_loss_total=0.1409 epoch=10 train_loss_total=0.0691 eval_loss_total=0.1725 epoch=11 train_loss_total=0.0641 eval_loss_total=0.1423 epoch=12 train_loss_total=0.0382 eval_loss_total=0.1490 epoch=13 train_loss_total=0.0214 eval_loss_total=0.1517 epoch=14 train_loss_total=0.0119 eval_loss_total=0.1717 epoch=15 train_loss_total=0.0060 eval_loss_total=0.2366 epoch=16 train_loss_total=0.0029 eval_loss_total=0.2069 epoch=17 trai[...]<jupyter_text>In the end, we see that the eval loss is very similar to the one we saw earlier when we trained without `peft`. This is quite nice to see, given that we are training a much smaller number of parameters. Check which parameters were updated Finally, just to check that LoRA was applied as expected, we check what original weights were updated what weights stayed the same.<jupyter_code>for name, param in peft_model.base_model.named_parameters(): if "lora" not in name: continue print(f"New parameter {name:<13} | {param.numel():>5} parameters | updated") params_before = dict(module_copy.named_parameters()) for name, param in peft_model.base_model.named_parameters(): if "lora" in name: continue name_before = ( name.partition(".")[-1].replace("base_layer.", "").replace("original_", "").replace("module.", "").replace("modules_to_save.default.", "") ) param_before = params_before[name_before] if torch.allclose(param, param_before): print(f"Parameter {name_before:<13} | {param.numel():>7} parameters | not updated") else: print(f"Parameter {name_before:<13} | {param.numel():>7} parameters | updated")<jupyter_output>Parameter seq.0.weight | 40000 parameters | not updated Parameter seq.0.bias | 2000 parameters | not updated Parameter seq.2.weight | 4000000 parameters | not updated Parameter seq.2.bias | 2000 parameters | not updated Parameter seq.4.weight | 4000 parameters | not updated Parameter seq.4.bias | 2 parameters | not updated Parameter seq.4.weight | 4000 parameters | updated Parameter seq.4.bias | 2 parameters | updated<jupyter_text>So we can see that apart from the new LoRA weights that were added, only the last layer was updated. Since the LoRA weights and the last layer have comparitively few parameters, this gives us a big boost in efficiency. Sharing the model through Hugging Face Hub Pushing the model to HF Hub With the `peft` model, it is also very easy to push a model the Hugging Face Hub. Below, we demonstrate how it works. It is assumed that you have a valid Hugging Face account and are logged in:<jupyter_code>user = "BenjaminB" # put your user name here model_name = "peft-lora-with-custom-model" model_id = f"{user}/{model_name}" peft_model.push_to_hub(model_id);<jupyter_output><empty_output><jupyter_text>As we can see, the adapter size is only 211 kB. Loading the model from HF Hub Now, it only takes one step to load the model from HF Hub. To do this, we can use `PeftModel.from_pretrained`, passing our base model and the model ID:<jupyter_code>loaded = peft.PeftModel.from_pretrained(module_copy, model_id) type(loaded)<jupyter_output><empty_output><jupyter_text>Let's check that the two models produce the same output:<jupyter_code>y_peft = peft_model(X.to(device)) y_loaded = loaded(X.to(device)) torch.allclose(y_peft, y_loaded)<jupyter_output><empty_output><jupyter_text>Clean up Finally, as a clean up step, you may want to delete the repo.<jupyter_code>from huggingface_hub import delete_repo delete_repo(model_id)<jupyter_output><empty_output>
peft/examples/multilayer_perceptron/multilayer_perceptron_lora.ipynb/0
{ "file_path": "peft/examples/multilayer_perceptron/multilayer_perceptron_lora.ipynb", "repo_id": "peft", "token_count": 4087 }
205
import os from enum import Enum import packaging.version import torch import transformers from datasets import DatasetDict, load_dataset, load_from_disk from datasets.builder import DatasetGenerationError from transformers import ( AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, ) from peft import LoraConfig DEFAULT_CHATML_CHAT_TEMPLATE = "{% for message in messages %}\n{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% if loop.last and add_generation_prompt %}{{'<|im_start|>assistant\n' }}{% endif %}{% endfor %}" DEFAULT_ZEPHYR_CHAT_TEMPLATE = "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" class ZephyrSpecialTokens(str, Enum): user = "<|user|>" assistant = "<|assistant|>" system = "<|system|>" eos_token = "</s>" bos_token = "<s>" pad_token = "<pad>" @classmethod def list(cls): return [c.value for c in cls] class ChatmlSpecialTokens(str, Enum): user = "<|im_start|>user" assistant = "<|im_start|>assistant" system = "<|im_start|>system" eos_token = "<|im_end|>" bos_token = "<s>" pad_token = "<pad>" @classmethod def list(cls): return [c.value for c in cls] def create_datasets(tokenizer, data_args, training_args, apply_chat_template=False): def preprocess(samples): batch = [] for conversation in samples["messages"]: batch.append(tokenizer.apply_chat_template(conversation, tokenize=False)) return {"content": batch} raw_datasets = DatasetDict() for split in data_args.splits.split(","): try: # Try first if dataset on a Hub repo dataset = load_dataset(data_args.dataset_name, split=split) except DatasetGenerationError: # If not, check local dataset dataset = load_from_disk(os.path.join(data_args.dataset_name, split)) if "train" in split: raw_datasets["train"] = dataset elif "test" in split: raw_datasets["test"] = dataset else: raise ValueError(f"Split type {split} not recognized as one of test or train.") if apply_chat_template: raw_datasets = raw_datasets.map( preprocess, batched=True, remove_columns=raw_datasets["train"].column_names, ) train_data = raw_datasets["train"] valid_data = raw_datasets["test"] print(f"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}") print(f"A sample of train dataset: {train_data[0]}") return train_data, valid_data def create_and_prepare_model(args, data_args, training_args): if args.use_unsloth: from unsloth import FastLanguageModel bnb_config = None quant_storage_dtype = None if ( torch.distributed.is_available() and torch.distributed.is_initialized() and torch.distributed.get_world_size() > 1 and args.use_unsloth ): raise NotImplementedError("Unsloth is not supported in distributed training") if args.use_4bit_quantization: compute_dtype = getattr(torch, args.bnb_4bit_compute_dtype) quant_storage_dtype = getattr(torch, args.bnb_4bit_quant_storage_dtype) bnb_config = BitsAndBytesConfig( load_in_4bit=args.use_4bit_quantization, bnb_4bit_quant_type=args.bnb_4bit_quant_type, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_use_double_quant=args.use_nested_quant, bnb_4bit_quant_storage=quant_storage_dtype, ) if compute_dtype == torch.float16 and args.use_4bit_quantization: major, _ = torch.cuda.get_device_capability() if major >= 8: print("=" * 80) print("Your GPU supports bfloat16, you can accelerate training with the argument --bf16") print("=" * 80) elif args.use_8bit_quantization: bnb_config = BitsAndBytesConfig(load_in_8bit=args.use_8bit_quantization) if args.use_unsloth: if torch.xpu.is_available(): raise NotImplementedError("XPU hasn't supported unsloth yet") # Load model model, _ = FastLanguageModel.from_pretrained( model_name=args.model_name_or_path, max_seq_length=training_args.max_seq_length, dtype=None, load_in_4bit=args.use_4bit_quantization, ) else: torch_dtype = ( quant_storage_dtype if quant_storage_dtype and quant_storage_dtype.is_floating_point else torch.float32 ) # Prepare model loading arguments model_kwargs = { "trust_remote_code": True, "torch_dtype": torch_dtype, } if args.use_flash_attn: if torch.xpu.is_available(): print("XPU hasn't supported flash_attn yet, use eager implementation instead.") model_kwargs["attn_implementation"] = "eager" else: model_kwargs["attn_implementation"] = "flash_attention_2" # Only add quantization_config if bnb_config is not None if bnb_config is not None: model_kwargs["quantization_config"] = bnb_config model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path, **model_kwargs) peft_config = None chat_template = None if args.use_peft_lora and not args.use_unsloth: peft_config = LoraConfig( lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, r=args.lora_r, bias="none", task_type="CAUSAL_LM", target_modules=args.lora_target_modules.split(",") if args.lora_target_modules != "all-linear" else args.lora_target_modules, ) special_tokens = None chat_template = None if args.chat_template_format == "chatml": special_tokens = ChatmlSpecialTokens chat_template = DEFAULT_CHATML_CHAT_TEMPLATE elif args.chat_template_format == "zephyr": special_tokens = ZephyrSpecialTokens chat_template = DEFAULT_ZEPHYR_CHAT_TEMPLATE if special_tokens is not None: tokenizer = AutoTokenizer.from_pretrained( args.model_name_or_path, pad_token=special_tokens.pad_token.value, bos_token=special_tokens.bos_token.value, eos_token=special_tokens.eos_token.value, additional_special_tokens=special_tokens.list(), trust_remote_code=True, ) tokenizer.chat_template = chat_template # make embedding resizing configurable? # Transformers 4.46.0+ defaults uses mean_resizing by default, which fails with QLoRA + FSDP because the # embedding could be on meta device, therefore, we set mean_resizing=False in that case (i.e. the status quo # ante). See https://github.com/huggingface/accelerate/issues/1620. uses_transformers_4_46 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.46.0") uses_fsdp = os.environ.get("ACCELERATE_USE_FSDP", "false").lower() == "true" # Check if the model is quantized is_quantized = (bnb_config is not None) or (getattr(model, "hf_quantizer", None) is not None) if is_quantized and uses_fsdp and uses_transformers_4_46: model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=8, mean_resizing=False) else: model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=8) else: tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token if args.use_unsloth: # Do model patching and add fast LoRA weights model = FastLanguageModel.get_peft_model( model, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, r=args.lora_r, target_modules=args.lora_target_modules.split(",") if args.lora_target_modules != "all-linear" else args.lora_target_modules, use_gradient_checkpointing=training_args.gradient_checkpointing, random_state=training_args.seed, max_seq_length=training_args.max_seq_length, ) return model, peft_config, tokenizer
peft/examples/sft/utils.py/0
{ "file_path": "peft/examples/sft/utils.py", "repo_id": "peft", "token_count": 3912 }
206
{ "alpha_pattern": {}, "auto_mapping": null, "base_model_name_or_path": null, "beta1": 0.85, "beta2": 0.85, "bias": "none", "corda_config": null, "deltaT": 1, "eva_config": null, "exclude_modules": null, "fan_in_fan_out": false, "inference_mode": false, "init_lora_weights": true, "init_r": 64, "layer_replication": null, "layers_pattern": null, "layers_to_transform": null, "loftq_config": {}, "lora_alpha": 8, "lora_bias": false, "lora_dropout": 0.0, "megatron_config": null, "megatron_core": "megatron.core", "modules_to_save": null, "orth_reg_weight": 0.5, "peft_type": "ADALORA", "r": 8, "rank_pattern": null, "revision": null, "target_modules": null, "target_r": 32, "task_type": null, "tfinal": 500, "tinit": 200, "total_step": 5000, "use_dora": false, "use_rslora": false }
peft/method_comparison/MetaMathQA/experiments/adalora/llama-3.2-3B-rank32/adapter_config.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/adalora/llama-3.2-3B-rank32/adapter_config.json", "repo_id": "peft", "token_count": 384 }
207
{ "alpha": 64, "alpha_pattern": {}, "auto_mapping": null, "base_model_name_or_path": null, "decompose_both": false, "decompose_factor": -1, "exclude_modules": null, "inference_mode": false, "init_weights": true, "layers_pattern": null, "layers_to_transform": null, "module_dropout": 0.0, "modules_to_save": null, "peft_type": "LOKR", "r": 32, "rank_dropout": 0.0, "rank_dropout_scale": false, "rank_pattern": {}, "revision": null, "target_modules": [ "q_proj", "v_proj" ], "task_type": null, "use_effective_conv2d": false }
peft/method_comparison/MetaMathQA/experiments/lokr/llama-3.2-3B-rank32/adapter_config.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/lokr/llama-3.2-3B-rank32/adapter_config.json", "repo_id": "peft", "token_count": 254 }
208
import pandas as pd import pytest from .sanitizer import parse_and_filter @pytest.fixture def df_products(): data = { 'product_id': [101, 102, 103, 104, 105, 106], 'category': ['Electronics', 'Books', 'Electronics', 'Home Goods', 'Books', 'Electronics'], 'price': [799.99, 19.99, 49.50, 120.00, 24.99, 150.00], 'stock': [15, 300, 50, 25, 150, 0] } return pd.DataFrame(data) def test_exploit_fails(df_products): with pytest.raises(ValueError) as e: mask1 = parse_and_filter(df_products, """price < 50 and @os.system("/bin/echo password")""") assert 'Invalid filter syntax' in str(e) @pytest.mark.parametrize('expression,ids', [ ("price < 50", [102, 103, 105]), ("product_id in [101, 102]", [101, 102]), ("price < 50 and category == 'Electronics'", [103]), ("stock < 100 or category == 'Home Goods'", [101, 103, 104, 106]), ("(price > 100 and stock < 20) or category == 'Books'", [101, 102, 105, 106]), ("not (price > 50 or stock > 100)", [103]), ("not price > 50", [102, 103, 105]), ("(price < 50) & (category == 'Electronics')", [103]), ("(stock < 100) | (category == 'Home Goods')", [101, 103, 104, 106]), ]) def test_operations(df_products, expression, ids): mask1 = parse_and_filter(df_products, expression) assert sorted(df_products[mask1].product_id) == sorted(ids)
peft/method_comparison/test_sanitizer.py/0
{ "file_path": "peft/method_comparison/test_sanitizer.py", "repo_id": "peft", "token_count": 554 }
209
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is a minimal example of launching PEFT with Accelerate. This used to cause issues because PEFT would eagerly # import bitsandbytes, which initializes CUDA, resulting in: # > RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the # > 'spawn' start method # This script exists to ensure that this issue does not reoccur. import torch from accelerate import notebook_launcher import peft from peft.utils import infer_device def init(): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(1, 2) def forward(self, x): return self.linear(x) device = infer_device() model = MyModule().to(device) peft.get_peft_model(model, peft.LoraConfig(target_modules=["linear"])) def main(): notebook_launcher(init, (), num_processes=2) if __name__ == "__main__": main()
peft/scripts/launch_notebook_mp.py/0
{ "file_path": "peft/scripts/launch_notebook_mp.py", "repo_id": "peft", "token_count": 493 }
210
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import collections import copy import inspect import os import warnings from contextlib import contextmanager, nullcontext from copy import deepcopy from dataclasses import dataclass from typing import Any, Literal, Optional, Union import packaging.version import torch import transformers from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules from accelerate.utils import get_balanced_memory, named_module_tensors from huggingface_hub import HfFileSystem, ModelCard, ModelCardData, hf_hub_download from safetensors import safe_open from safetensors.torch import save_file as safe_save_file from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import Cache, DynamicCache, EncoderDecoderCache, PreTrainedModel from transformers.modeling_outputs import QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer from peft.utils.constants import DUMMY_MODEL_CONFIG from peft.utils.integrations import init_empty_weights from peft.utils.other import create_attention_mask, set_additional_trainable_modules from . import __version__ from .config import PeftConfig from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING, PEFT_TYPE_TO_PREFIX_MAPPING, PEFT_TYPE_TO_TUNER_MAPPING from .utils import ( SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftType, TaskType, _get_batch_size, _prepare_prompt_learning_config, _set_adapter, _set_trainable, get_peft_model_state_dict, id_tensor_storage, infer_device, load_peft_weights, map_cache_to_layer_device_map, set_peft_model_state_dict, shift_tokens_right, ) class PeftModel(PushToHubMixin, torch.nn.Module): """ Base model encompassing various Peft methods. Args: model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. peft_config ([`PeftConfig`]): The configuration of the Peft model. adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`. autocast_adapter_dtype (`bool`, *optional*): Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect select PEFT tuners. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading loading process. <Tip> Don't use `low_cpu_mem_usage=True` when creating a new PEFT adapter for training. </Tip> **Attributes**: - **base_model** ([`torch.nn.Module`]) -- The base transformer model used for Peft. - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when saving the model. - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if using [`PromptLearningConfig`]. - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if using [`PromptLearningConfig`]. - **transformer_backbone_name** (`str`) -- The name of the transformer backbone in the base model if using [`PromptLearningConfig`]. - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone in the base model if using [`PromptLearningConfig`]. """ def __init__( self, model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default", autocast_adapter_dtype: bool = True, low_cpu_mem_usage: bool = False, ) -> None: super().__init__() self.active_adapter = adapter_name self.peft_type = peft_config.peft_type # These args are special PEFT arguments that users can pass. They need to be removed before passing them to # forward. self.special_peft_forward_args = {"adapter_names"} self._is_prompt_learning = peft_config.is_prompt_learning if self._is_prompt_learning: self._peft_config = {adapter_name: peft_config} self.base_model = model self.add_adapter(adapter_name, peft_config, low_cpu_mem_usage=low_cpu_mem_usage) else: self._peft_config = None cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type] ctx = init_empty_weights if low_cpu_mem_usage else nullcontext with ctx(): self.base_model = cls(model, {adapter_name: peft_config}, adapter_name) if hasattr(self.base_model, "_cast_adapter_dtype"): self.base_model._cast_adapter_dtype( adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype ) if getattr(model, "is_gradient_checkpointing", True): model = self.prepare_model_for_gradient_checkpointing(model) # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected # behavior we disable that in this line. if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): self.base_model.config.pretraining_tp = 1 @property def peft_config(self) -> dict[str, PeftConfig]: if self._is_prompt_learning: return self._peft_config return self.base_model.peft_config @property def active_adapters(self) -> list[str]: try: adapters = self.base_model.active_adapters if not isinstance(adapters, list): # Base model is probably a transformers model, see: # https://github.com/huggingface/transformers/pull/30790#issuecomment-2253808249 # Unfortunately, transformers models also have an active_adapters method but it's 1) not a property and # 2) calling it fails because the base model (usually) has no loaded adapter. The base model can be a # transformers model for prompt learning, where the base model is not wrapped in a LoraModel or similar. adapters = self.active_adapter if isinstance(adapters, str): adapters = [adapters] except AttributeError: adapters = self.active_adapter if isinstance(adapters, str): adapters = [adapters] return adapters @peft_config.setter def peft_config(self, value: dict[str, PeftConfig]): if self._is_prompt_learning: self._peft_config = value else: self.base_model.peft_config = value def save_pretrained( self, save_directory: str, safe_serialization: bool = True, selected_adapters: Optional[list[str]] = None, save_embedding_layers: Union[str, bool] = "auto", is_main_process: bool = True, path_initial_model_for_weight_conversion: Optional[str] = None, **kwargs: Any, ) -> None: r""" This function saves the adapter model and the adapter configuration files to a directory, so that it can be reloaded using the [`PeftModel.from_pretrained`] class method, and also used by the [`PeftModel.push_to_hub`] method. Args: save_directory (`str`): Directory where the adapter model and configuration files will be saved (will be created if it does not exist). safe_serialization (`bool`, *optional*): Whether to save the adapter files in safetensors format, defaults to `True`. selected_adapters (`List[str]`, *optional*): A list of adapters to be saved. If `None`, will default to all adapters. save_embedding_layers (`Union[bool, str]`, *optional*, defaults to `"auto"`): If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. and automatically sets the boolean flag. This only works for 🤗 transformers models. is_main_process (`bool`, *optional*): Whether the process calling this is the main process or not. Will default to `True`. Will not save the checkpoint if not on the main process, which is important for multi device setups (e.g. DDP). path_initial_model_for_weight_conversion (`str, *optional*`): The path to the initialized adapter, which is obtained after initializing the model with PiSSA/CorDA/OLoRA and before performing any training. When `path_initial_model_for_weight_conversion` is not None, the difference in adapter before and after fine-tuning is calculated. This difference can be represented as the parameters of a standard LoRA adapter. Using this converted adapter does not require changes to the base model, thus conveniently allowing the use of multiple PiSSA/CorDA/OLoRA adapters with LoRA adapters, and the activation or deactivation of any adapters. Note that this conversion is not supported if `rslora` is used in combination with `rank_pattern` or `alpha_pattern`. kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the `push_to_hub` method. """ if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") if selected_adapters is None: selected_adapters = list(self.peft_config.keys()) else: if any( selected_adapter_name not in list(self.peft_config.keys()) for selected_adapter_name in selected_adapters ): raise ValueError( f"You passed an invalid `selected_adapters` arguments, current supported adapter names are" f" {list(self.peft_config.keys())} - got {selected_adapters}." ) def save_mutated_as_lora(peft_config, path_initial_model_for_weight_conversion, output_state_dict, kwargs): if peft_config.use_rslora and (peft_config.rank_pattern or peft_config.alpha_pattern): msg = ( "Passing `path_initial_model_for_weight_conversion` to `save_pretrained` is not supported when " "using `rank_pattern` or `alpha_pattern` at the same time as `use_rslora=True`." ) raise ValueError(msg) if not any( str(peft_config.init_lora_weights).lower().startswith(prefix) for prefix in ["pissa", "corda", "olora", "true"] ): warnings.warn( "`path_initial_model_for_weight_conversion` only works for converting a PiSSA/CorDA/OLoRA adapter to " "a LoRA adapter" ) initial_adapter_name = os.path.basename(path_initial_model_for_weight_conversion) try: self.load_adapter( os.path.dirname(path_initial_model_for_weight_conversion), subfolder=initial_adapter_name, adapter_name=initial_adapter_name, ) is_pissa = str(self.peft_config[initial_adapter_name].init_lora_weights).lower().startswith("pissa") is_corda = str(self.peft_config[initial_adapter_name].init_lora_weights).lower() == "corda" is_olora = str(self.peft_config[initial_adapter_name].init_lora_weights).lower() == "olora" if is_pissa or is_corda or is_olora: raise ValueError( "The `init_lora_weights` parameter of the initial adapter should be set to `True`. " "Otherwise, `self.load_adapter` will subtract the decomposed values again based on the " "residual model." ) output_state_dict = self.base_model.subtract_mutated_init( output_state_dict, initial_adapter_name, kwargs ) finally: self.delete_adapter(initial_adapter_name) return output_state_dict if is_main_process: os.makedirs(save_directory, exist_ok=True) self.create_or_update_model_card(save_directory) for adapter_name in selected_adapters: peft_config = self.peft_config[adapter_name] # save only the trainable weights output_state_dict = get_peft_model_state_dict( self, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name, save_embedding_layers=save_embedding_layers, ) output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory os.makedirs(output_dir, exist_ok=True) if is_main_process and safe_serialization: # Section copied from: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L2111-L2134 # Safetensors does not allow tensor aliasing. # We're going to remove aliases before saving ptrs = collections.defaultdict(list) for name, tensor in output_state_dict.items(): # Sometimes in the state_dict we have non-tensor objects. # e.g. in bitsandbytes we have some `str` objects in the state_dict if isinstance(tensor, torch.Tensor): ptrs[id_tensor_storage(tensor)].append(name) else: # In the non-tensor case, fall back to the pointer of the object itself ptrs[id(tensor)].append(name) # These are all the pointers of shared tensors. shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} for _, names in shared_ptrs.items(): # Here we just clone the shared tensors to avoid tensor aliasing which is # not supported in safetensors. for shared_tensor_name in names[1:]: output_state_dict[shared_tensor_name] = output_state_dict[shared_tensor_name].clone() if path_initial_model_for_weight_conversion is not None: peft_config = copy.deepcopy(peft_config) peft_config.init_lora_weights = True peft_config.save_pretrained(path_initial_model_for_weight_conversion) output_state_dict = save_mutated_as_lora( peft_config, path_initial_model_for_weight_conversion, output_state_dict, kwargs ) safe_save_file( output_state_dict, os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME), metadata={"format": "pt"}, ) elif is_main_process: if path_initial_model_for_weight_conversion is not None: peft_config = copy.deepcopy(peft_config) peft_config.init_lora_weights = True peft_config.save_pretrained(path_initial_model_for_weight_conversion) output_state_dict = save_mutated_as_lora( peft_config, path_initial_model_for_weight_conversion, output_state_dict, kwargs ) torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) # save the config and change the inference mode to `True` if peft_config.base_model_name_or_path is None: peft_config.base_model_name_or_path = ( self.base_model.__dict__.get("name_or_path", None) if peft_config.is_prompt_learning else self.base_model.model.__dict__.get("name_or_path", None) ) inference_mode = peft_config.inference_mode peft_config.inference_mode = True if peft_config.task_type is None: # deal with auto mapping base_model_class = self._get_base_model_class( is_prompt_tuning=peft_config.is_prompt_learning, ) parent_library = base_model_class.__module__ auto_mapping_dict = { "base_model_class": base_model_class.__name__, "parent_library": parent_library, } else: auto_mapping_dict = None if is_main_process: if path_initial_model_for_weight_conversion is not None: peft_config.init_lora_weights = True peft_config.r *= 2 if not peft_config.use_rslora: peft_config.lora_alpha *= 2 else: # with rslora, we have scaling = alpha / sqrt(r), we thus adjust alpha to keep the same scaling peft_config.lora_alpha *= 2**0.5 if peft_config.rank_pattern: peft_config.rank_pattern = {key: 2 * val for key, val in peft_config.rank_pattern.items()} if peft_config.alpha_pattern: peft_config.alpha_pattern = {key: 2 * val for key, val in peft_config.alpha_pattern.items()} peft_config.save_pretrained(output_dir, auto_mapping_dict=auto_mapping_dict) peft_config.inference_mode = inference_mode @classmethod def from_pretrained( cls, model: torch.nn.Module, model_id: Union[str, os.PathLike], adapter_name: str = "default", is_trainable: bool = False, config: Optional[PeftConfig] = None, autocast_adapter_dtype: bool = True, ephemeral_gpu_offload: bool = False, low_cpu_mem_usage: bool = False, key_mapping: Optional[dict[str, str]] = None, **kwargs: Any, ) -> PeftModel: r""" Instantiate a PEFT model from a pretrained model and loaded PEFT weights. Note that the passed `model` may be modified inplace. Args: model ([`torch.nn.Module`]): The model to be adapted. For 🤗 Transformers models, the model should be initialized with the [`~transformers.PreTrainedModel.from_pretrained`]. model_id (`str` or `os.PathLike`): The name of the PEFT configuration to use. Can be either: - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face Hub. - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` method (`./my_peft_config_directory/`). adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to be loaded. This is useful for loading multiple adapters. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. config ([`~peft.PeftConfig`], *optional*): The configuration object to use instead of an automatically loaded configuration. This configuration object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already loaded before calling `from_pretrained`. autocast_adapter_dtype (`bool`, *optional*): Whether to autocast the adapter dtype. Defaults to `True`. Only relevant for specific adapter types. ephemeral_gpu_offload (`bool`, *optional*): Whether to use ephemeral GPU offloading for partially loaded modules. Defaults to `False`. This is useful when parts of the model and/or components (such as adapters) are kept in CPU memory until they are needed. Rather than perform expensive operations on small data, the data is transferred to the GPU on-demand, the operation(s) performed, and the results moved back to CPU memory. This brings a slight momentary VRAM overhead but gives orders of magnitude speedup in certain cases. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device before loading the saved weights. Useful to speed up the process. torch_device (`str`, *optional*, defaults to None): The device to load the adapter on. If `None`, the device will be inferred. key_mapping (dict, *optional*, defaults to None) Extra mapping of PEFT `state_dict` keys applied before loading the `state_dict`. When this mapping is applied, the PEFT-specific `"base_model.model"` prefix is removed beforehand and the adapter name (e.g. `"default"`) is not inserted yet. Only pass this argument if you know what you're doing. kwargs: (`optional`): Additional keyword arguments passed along to the specific PEFT configuration class. """ from .auto import MODEL_TYPE_TO_PEFT_MODEL_MAPPING from .tuners import XLoraConfig, XLoraModel # load the config if config is None: config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, subfolder=kwargs.get("subfolder", None), revision=kwargs.get("revision", None), cache_dir=kwargs.get("cache_dir", None), use_auth_token=kwargs.get("use_auth_token", None), token=kwargs.get("token", None), ) ].from_pretrained(model_id, **kwargs) elif isinstance(config, PeftConfig): config.inference_mode = not is_trainable else: raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") # See discussion in https://github.com/huggingface/transformers/pull/38627 # Some transformers models can have a _checkpoint_conversion_mapping dict that is used to map state_dicts # stemming from updated model architectures so that they still correspond to the initial architecture. When # loading a PEFT state_dict created with the initial architecture on a model with the new architecture, we need # to map it too according to the same rules. Note that we skip prompt learning methods. This is because they # don't have the "base_model.model." prefix, which we need to remove before mapping. Instead just using # "base_model.". This could be fine, we could only remove "base_model.", However, the subsequent sub-module # could also be called "model", resulting in what looks like "base_model.model.". To avoid this confusion, we # skip prompt learning. Since it applies itself directly to the pre-trained model (unlike LoRA et al that target # sub-modules), skipping should be fine. if (key_mapping is None) and (not config.is_prompt_learning): key_mapping = getattr(model, "_checkpoint_conversion_mapping", {}) # Runtime configuration, if supported if hasattr(config, "runtime_config"): config.runtime_config.ephemeral_gpu_offload = ephemeral_gpu_offload else: if ephemeral_gpu_offload: warnings.warn("Ephemeral GPU offloading is not supported for this model. Ignoring.") if hasattr(model, "hf_device_map"): weight_map = dict(named_module_tensors(model, recurse=True)) # recreate the offload_index for disk-offloaded modules: we need to know the location in storage of each weight # before the offload hook is removed from the model disk_modules = set() index = None for name, module in model.named_modules(): if hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "original_devices"): if hasattr(module._hf_hook.weights_map, "dataset"): index = module._hf_hook.weights_map.dataset.index for key in module._hf_hook.original_devices.keys(): if module._hf_hook.original_devices[key] == torch.device("meta"): disk_modules.add(str(name) + "." + str(key)) if disk_modules and not kwargs.get("use_safetensors", True): raise ValueError("Disk offloading currently only supported for safetensors") if index: offload_index = { p: { "safetensors_file": index[p]["safetensors_file"], "weight_name": p, "dtype": str(weight_map[p].dtype).replace("torch.", ""), } for p in weight_map.keys() if p in disk_modules } kwargs["offload_index"] = offload_index if (getattr(model, "hf_device_map", None) is not None) and len( set(model.hf_device_map.values()).intersection({"cpu", "disk"}) ) > 0: remove_hook_from_submodules(model) if config.is_prompt_learning and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: config.inference_mode = not is_trainable if isinstance(getattr(model, "base_model", None), XLoraModel): if not isinstance(config, XLoraConfig): raise TypeError(f"Expected 'XLoraConfig', got '{type(config)}' instead.") if "adapters" in kwargs: config.adapters = kwargs["adapters"] else: # If the path is on HF hub, then we get the adapter names to create a subfolders list which tells # `load_adapter` where the adapters are. if not os.path.exists(model_id): s = HfFileSystem() # The names of the adapters which must be in folders adapter_names = [ file["name"][len(model_id) + 1 :] for file in s.ls(model_id) if file["type"] == "directory" ] # Prepare a dict of adapter paths, which really just point to the hf id; we will use the subfolders adapter_paths = {} for adapter_name in adapter_names: adapter_paths[adapter_name] = os.path.join(model_id, model_id) config.adapters = adapter_paths config._subfolders = adapter_names else: if "adapters" not in kwargs: raise ValueError("If model_id is a local path, then `adapters` must be passed in kwargs.") if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys(): model = cls( model, config, adapter_name, autocast_adapter_dtype=autocast_adapter_dtype, low_cpu_mem_usage=low_cpu_mem_usage, ) else: model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type]( model, config, adapter_name, autocast_adapter_dtype=autocast_adapter_dtype, low_cpu_mem_usage=low_cpu_mem_usage, ) load_result = model.load_adapter( model_id, adapter_name, is_trainable=is_trainable, autocast_adapter_dtype=autocast_adapter_dtype, low_cpu_mem_usage=low_cpu_mem_usage, key_mapping=key_mapping, **kwargs, ) # 1. Remove VB-LoRA vector bank, since it's a shared parameter set via the VBLoRAModel # 2. Remove the prompt encoder, as it does not need to be part of the checkpoint missing_keys = [ k for k in load_result.missing_keys if "vblora_vector_bank" not in k and "prompt_encoder" not in k ] if missing_keys: # Let's warn here since (in contrast to load_adapter) we don't return the load result, so it could be quite # difficult for users to even notice that something might have gone wrong here. As we filter out non PEFT # keys from the missing keys, this gives no false positives. # careful: if the wording of the warning is changed, adjust the unit tests accordingly! warn_message = f"Found missing adapter keys while loading the checkpoint: {missing_keys}." prefix = PEFT_TYPE_TO_PREFIX_MAPPING.get(config.peft_type) if prefix and adapter_name in prefix: warn_message += ( f"Adapter name {adapter_name} should not be contained in the prefix {prefix}." "This could be the potential reason for missing adapter keys." ) warnings.warn(warn_message) return model def _setup_prompt_encoder(self, adapter_name: str): config = self.peft_config[adapter_name] if not hasattr(self, "prompt_encoder"): self.prompt_encoder = torch.nn.ModuleDict({}) self.prompt_tokens = {} transformer_backbone = None for name, module in self.base_model.named_children(): for param in module.parameters(): param.requires_grad = False if isinstance(module, PreTrainedModel): # Make sure to freeze Tranformers model if transformer_backbone is None: transformer_backbone = module self.transformer_backbone_name = name if transformer_backbone is None: transformer_backbone = self.base_model if config.num_transformer_submodules is None: config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 # determine the word embeddings word_embeddings = None try: # First try to find the word embeddings based on the module name, this should work for models like Bert, # Roberta, Deberta, etc. word_embeddings = self.base_model.get_submodule("embeddings.word_embeddings") except AttributeError: pass if word_embeddings is None: # Word embeddings could not be determined. Next try to guess them by checking which parameter has the size # of the vocab. for named_param, value in list(transformer_backbone.named_parameters()): # for ZeRO-3, the tensor is sharded across accelerators and deepspeed modifies it to a tensor with shape # [0] the actual unsharded shape is stored in "ds_shape" attribute special handling is needed in case # the model is initialized in deepspeed.zero.Init() context or HfDeepSpeedConfig has been called before # For reference refer to issue: https://github.com/huggingface/peft/issues/996 deepspeed_distributed_tensor_shape = getattr(value, "ds_shape", None) # Handle VLM case with separate text and vision configs if hasattr(self.base_model.config, "get_text_config"): vocab_size = self.base_model.config.get_text_config().vocab_size # below: for older transformers versions before get_text_config was added elif "text_config" in self.base_model.config: vocab_size = self.base_model.config.text_config.vocab_size else: vocab_size = self.base_model.config.vocab_size if value.shape[0] == vocab_size or ( deepspeed_distributed_tensor_shape is not None and deepspeed_distributed_tensor_shape[0] == vocab_size ): word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", "")) break self.word_embeddings = word_embeddings model_cls = PEFT_TYPE_TO_TUNER_MAPPING[config.peft_type] if config.peft_type in (PeftType.PROMPT_TUNING, PeftType.MULTITASK_PROMPT_TUNING, PeftType.CPT): prompt_encoder = model_cls(config, self.word_embeddings) elif config.peft_type == PeftType.P_TUNING: prompt_encoder = model_cls(config) elif config.peft_type == PeftType.PREFIX_TUNING: # prefix tuning now uses Cache but that won't work with gradient checkpointing if any(getattr(module, "gradient_checkpointing", False) for module in self.get_base_model().modules()): raise ValueError("Prefix tuning does not work with gradient checkpointing.") prompt_encoder = model_cls(config) else: raise ValueError("Not supported") prompt_encoder = prompt_encoder.to(self.device) self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder})) self.prompt_tokens[adapter_name] = torch.arange( config.num_virtual_tokens * config.num_transformer_submodules ).long() def prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel): r""" Prepares the model for gradient checkpointing if necessary """ self._prepare_model_for_gradient_checkpointing(model) def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel): if not ( getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) or getattr(model, "is_quantized", False) ): if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() elif hasattr(model, "get_input_embeddings"): def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) return model def get_prompt_embedding_to_save(self, adapter_name: str) -> torch.Tensor: """ Returns the prompt embedding to save when saving the model. Only applicable when using a prompt learning method. """ prompt_encoder = self.prompt_encoder[adapter_name] prompt_tokens = ( self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device) ) peft_type = self.peft_config[adapter_name].peft_type if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens] if self.peft_config[adapter_name].peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompt_embedding_cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_type] prompt_embeddings = super(prompt_embedding_cls, prompt_encoder).forward(prompt_tokens) else: prompt_embeddings = prompt_encoder(prompt_tokens) return prompt_embeddings[0].detach().cpu() def get_prompt( self, batch_size: int, task_ids: Optional[torch.Tensor] = None, max_cache_len: Optional[int] = None ) -> torch.Tensor: """ Returns the virtual prompts to use for Peft. Only applicable when using a prompt learning method. """ peft_config = self.active_peft_config prompt_encoder = self.prompt_encoder[self.active_adapter] prompt_tokens = ( self.prompt_tokens[self.active_adapter] .unsqueeze(0) .expand(batch_size, -1) .to(prompt_encoder.embedding.weight.device) ) if peft_config.peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens] if peft_config.inference_mode: past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: past_key_values = prompt_encoder(prompt_tokens) if self.base_model_torch_dtype is not None: past_key_values = past_key_values.to(self.base_model_torch_dtype) past_key_values = past_key_values.view( batch_size, peft_config.num_virtual_tokens, peft_config.num_layers * 2, peft_config.num_attention_heads, peft_config.token_dim // peft_config.num_attention_heads, ) if peft_config.num_transformer_submodules == 2: past_key_values = torch.cat([past_key_values, past_key_values], dim=2) # Transpose: 2 x [num_layers, batch_size, num_heads, num_virtual_tokens, head_dim] past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split( peft_config.num_transformer_submodules * 2 ) base_model = self.get_base_model() model_config = getattr(base_model, "config", None) model_type = getattr(model_config, "model_type", "") if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None: post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type] past_key_values = post_process_fn(past_key_values) elif ("gemma2" in model_type) or ("gemma3_text" in model_type): # TODO: remove this logic once transformers < 4.56 is dropped transformers_lt_4_56 = packaging.version.parse(transformers.__version__) < packaging.version.parse( "4.56.0.dev0" ) # Gemma2 and Gemma3 only support HybridCache (which does not have the from_legacy_cache method) if transformers_lt_4_56 and ((max_cache_len is None) or (max_cache_len == -1)): raise ValueError( "max_cache_len is missing but it should have been passed. Something went wrong, please open an " "issue on GitHub with a reproducer: https://github.com/huggingface/peft/issues" ) base_config = base_model.config if hasattr(base_config, "get_text_config"): base_config = base_config.get_text_config() if transformers_lt_4_56: # HybridCache is deprecated, and will be removed in 4.60.0 # see https://github.com/huggingface/transformers/pull/40276 from transformers import HybridCache new_cache = HybridCache( config=base_config, max_batch_size=batch_size, max_cache_len=max_cache_len, dtype=past_key_values[0].dtype, device=past_key_values[0].device, ) else: # transformers 4.56+ uses DynamicCache for gemma new_cache = DynamicCache(config=base_config) cache_position = torch.arange(peft_config.num_virtual_tokens, device=past_key_values[0].device) for layer_idx in range(peft_config.num_layers): key_states, value_states = past_key_values[0][layer_idx], past_key_values[1][layer_idx] new_cache.update( key_states, value_states, layer_idx, cache_kwargs={"cache_position": cache_position} ) past_key_values = new_cache elif peft_config.num_transformer_submodules == 1: # Dont' apply this to encoder-decoder models and not to models requiring special processing. # local import in case users use a very old transformers version past_key_values = DynamicCache.from_legacy_cache(past_key_values) elif (peft_config.num_transformer_submodules == 2) and getattr( self.base_model, "_supports_cache_class", True ): # Dont' apply this to encoder-decoder models that don't support new Cachc format yet # If we don't apply this, prefix-tuning fails to update cross-attn cache # TODO: remove check for _supports_cache_class once transformers 4.53 is no longer supported past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values.cross_attention_cache = DynamicCache() past_key_values.is_updated = { layer_idx: False for layer_idx in range(len(past_key_values.cross_attention_cache.key_cache)) } map_cache_to_layer_device_map(self.get_base_model(), past_key_values) # no-op if not a Cache instance return past_key_values else: if peft_config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompts = prompt_encoder(prompt_tokens, task_ids) else: if peft_config.inference_mode: prompts = prompt_encoder.embedding.weight else: # Take only one prompt token sample and expand the output instead of expanding the input, see: # https://github.com/huggingface/peft/issues/2043#issuecomment-2321522577 prompt_tokens = prompt_tokens[:1] prompts = prompt_encoder(prompt_tokens) prompts = prompts.repeat(batch_size, 1, 1) return prompts def get_nb_trainable_parameters(self) -> tuple[int, int]: r""" Returns the number of trainable parameters and the number of all parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in self.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel # Due to the design of 4bit linear layers from bitsandbytes # one needs to multiply the number of parameters by 2 to get # the correct number of parameters if param.__class__.__name__ == "Params4bit": if hasattr(param, "element_size"): num_bytes = param.element_size() elif not hasattr(param, "quant_storage"): num_bytes = 1 else: num_bytes = param.quant_storage.itemsize num_params = num_params * 2 * num_bytes all_param += num_params if param.requires_grad: trainable_params += num_params return trainable_params, all_param def print_trainable_parameters(self) -> None: """ Prints the number of trainable parameters in the model. Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model. For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number of trainable parameters of the backbone transformer model which can be different. """ trainable_params, all_param = self.get_nb_trainable_parameters() print( f"trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param:.4f}" ) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "base_model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.base_model, name) @contextmanager def _enable_peft_forward_hooks(self, *args, **kwargs): # If the base model has a method called _enable_peft_forward_hooks, it is invoked as a context. Otherwise, this # runs without any changes if hasattr(self.base_model, "_enable_peft_forward_hooks"): with self.base_model._enable_peft_forward_hooks(*args, **kwargs): yield return else: # nothing to enable yield return def forward(self, *args: Any, **kwargs: Any): """ Forward pass of the model. """ with self._enable_peft_forward_hooks(*args, **kwargs): kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} return self.get_base_model()(*args, **kwargs) def generate(self, *args, **kwargs): with self._enable_peft_forward_hooks(*args, **kwargs): kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} return self.get_base_model().generate(*args, **kwargs) def _get_base_model_class(self, is_prompt_tuning=False): """ Returns the base model class. """ if not is_prompt_tuning: return self.base_model.model.__class__ return self.base_model.__class__ @contextmanager def disable_adapter(self): """ Context manager that disables the adapter module. Use this to run inference on the base model. Example: ```py >>> with model.disable_adapter(): ... model(inputs) ``` """ if self.peft_config[self.active_adapter].is_prompt_learning: try: # TODO: consider replacing this patching of methods with a more robust mechanism: setting a flag and # letting the underlying methods deal with it, same as how LoRA does it. old_forward = self.forward self.forward = self.base_model.forward old_prepare_inputs_for_generation = self.prepare_inputs_for_generation self.prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation yield finally: self.forward = old_forward self.prepare_inputs_for_generation = old_prepare_inputs_for_generation elif self.peft_config[self.active_adapter].is_adaption_prompt: try: self.base_model.disable_adapter_layers() yield finally: self.base_model.enable_adapter_layers() else: # LoRA, LoHa, etc. model_status = self.get_model_status() if model_status.enabled == "irregular": warnings.warn( "The model contains some adapter layers that are enabled and others that are disabled. " "This is most likely unintentional. After exiting the disable_adapter context, all adapters " "will be enabled" ) try: self.base_model.disable_adapter_layers() yield finally: if model_status.enabled is not False: # model_status.enabled is `True` or `"irregular"` self.base_model.enable_adapter_layers() def get_base_model(self) -> torch.nn.Module: """ Returns the base model. """ return self.base_model if self.active_peft_config.is_prompt_learning else self.base_model.model def add_adapter(self, adapter_name: str, peft_config: PeftConfig, low_cpu_mem_usage: bool = False) -> None: """ Add an adapter to the model based on the passed configuration. This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`]. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the process when loading saved adapters. Don't use this option when creating a new PEFT adapter for training. """ prefix = PEFT_TYPE_TO_PREFIX_MAPPING.get(peft_config.peft_type) if prefix and adapter_name in prefix: warnings.warn( f"Adapter name {adapter_name} should not be contained in the prefix {prefix}." "This may lead to reinitialization of the adapter weights during loading." ) if peft_config.peft_type != self.peft_type: raise ValueError( f"Cannot combine adapters with different peft types. " f"Found {self.peft_type} and {peft_config.peft_type}." ) try: if peft_config.is_prompt_learning: self.peft_config[adapter_name] = peft_config if hasattr(self.config, "to_dict"): dict_config = self.config.to_dict() else: dict_config = self.config peft_config = _prepare_prompt_learning_config(peft_config, dict_config) self._setup_prompt_encoder(adapter_name) set_additional_trainable_modules( model=self.base_model, peft_config=peft_config, model_config=BaseTuner.get_model_config(self), adapter_name=adapter_name, ) elif peft_config.is_adaption_prompt: self.base_model.add_adapter(adapter_name, peft_config) set_additional_trainable_modules( model=self.base_model, peft_config=peft_config, model_config=BaseTuner.get_model_config(self), adapter_name=adapter_name, ) else: self.peft_config[adapter_name] = peft_config self.base_model.inject_adapter( self.base_model.model, adapter_name, low_cpu_mem_usage=low_cpu_mem_usage ) except Exception: # something went wrong, roll back if adapter_name in self.peft_config: del self.peft_config[adapter_name] raise def delete_adapter(self, adapter_name: str) -> None: """ Deletes an existing adapter. Args: adapter_name (str): Name of the adapter to be deleted. """ if adapter_name not in self.peft_config: raise ValueError(f"Adapter {adapter_name} does not exist") self.base_model.delete_adapter(adapter_name=adapter_name) new_active_adapters = self.active_adapters num_adapters = len(new_active_adapters) # Note: PeftModel assumes that there is exactly one active adapter, so we should theoretically raise if # num_adapters != 1. However, we have allowed this in the past (maybe inadvertently), so we let it slip and # don't introduce a backwards incompatibility by raising an error. if num_adapters == 1: self.active_adapter = new_active_adapters[0] @property def modules_to_save(self) -> Optional[set[str]]: modules: set[str] = set() for config in self.peft_config.values(): if getattr(config, "modules_to_save", None) is not None: # modules_to_save can only be a sequence of str, not a str modules.update(config.modules_to_save) if not modules: # for backwards compatibility, as modules_to_save was initialized as None return None return modules def get_layer_status(self) -> list[TunerLayerStatus]: """Get the status of each adapter layer in the model. This method returns a list of `TunerLayerStatus` dataclass instances, each of which contains the following attributes: - `name` (`str`): The name of the adapter layer, e.g. `model.encoder.block.0.layer.0.SelfAttention.q`. - `module_type` (`str`): The type of the adapter layer, e.g. `lora.Linear`. - `enabled` (`bool`): Whether the adapter layer is enabled. - `active_adapters` (`list[str]`): The names of the active adapters, if any, e.g. `["default"]`. - `merged_adapters` (`list[str]`): The names of the merged adapters, if any, e.g. `["default"]`. - `available_adapters` (`list[str]`): The names of the available adapters, e.g. `["default"]`. Args: model ([`~PeftModel`]): The model to get the adapter layer status from. Returns: list[`peft.peft_model.TunerLayerStatus`]: A list of dataclasses, each containing the status of the corresponding adapter layer. """ return get_layer_status(self) def get_model_status(self) -> TunerModelStatus: """Get the status of tuners of the model. This method returns a `TunerModelStatus` dataclass instance, which contains the following attributes: - `base_model_type` (`str`): The type of the base model, e.g. `T5Model`. - `adapter_model_type` (`str`): The type of the adapter model, e.g. `LoraModel`. - `peft_types` (`dict[str, str]`): The mapping of adapter name to adapter type, e.g. `{"default": "LORA"}`. - `trainable_params` (`int`): The number of trainable parameters in the model. - `total_params` (`int`): The total number of parameters in the model. - `num_adapter_layers` (`int`): The number of adapter layers in the model. - `enabled` (`bool`, `Literal["irregular"]`): Whether all adapter layers are enabled. If some are enabled and some are not, this will be `"irregular"`. This means that your model is in an inconsistent state and might not work as expected. - `active_adapters` (`list[str]`, `Literal["irregular"]`): The names of the active adapters. If the active adapters are not consistent across all layers, this will be `"irregular"`, which means that your model is in an inconsistent state and might not work as expected. - `merged_adapters` (`list[str]`, `Literal["irregular"]`): The names of the merged adapters. If the merged adapters are not consistent across all layers, this will be `"irregular"`, which means that your model is in an inconsistent state and might not work as expected. - `available_adapters` (`list[str]`): The names of the available adapters, e.g. `["default"]`. Args: model ([`~PeftModel`]): The model to get the adapter layer status from. Returns: `peft.peft_model.TunerModelStatus`: A dataclass containing the status of the model. """ return get_model_status(self) @classmethod def _split_kwargs(cls, kwargs: dict[str, Any]): _kwargs_not_in_hf_hub_download_signature = ("use_auth_token",) hf_hub_download_kwargs = {} other_kwargs = {} for key, value in kwargs.items(): if key in inspect.signature(hf_hub_download).parameters or key in _kwargs_not_in_hf_hub_download_signature: hf_hub_download_kwargs[key] = value else: other_kwargs[key] = value return hf_hub_download_kwargs, other_kwargs def _update_offload(self, offload_index: dict[str, dict[str, str]], adapters_weights: dict[str, torch.tensor]): """ Update the offload_index and safetensors files for loading and mergine PeftModels with disk-offloaded modules. Args: offload_index (Dict[str: str]): Dictionary of disk-offloaded modules with their metadata and safetensors filenames adapters_weights (Dict[str: torch.tensor]): Dictionary of Peft adapter module names and weights """ if not offload_index: return offload_index prefix = "base_model.model." # rename offload index weight and model names adapter_names = list(self.peft_config.keys()) for adapter_name in adapter_names: keys = list(offload_index.keys()) block_id = keys[0].split(".")[0] + "." # for writing safetensors key, # replace original offload index keys with PeftModel keys for key in keys: suffix_pos = key.rfind(".") extended_prefix = prefix + key[:suffix_pos] module = dict(self.named_modules())[extended_prefix] if isinstance(module, BaseTunerLayer): new_key = prefix + key[:suffix_pos] + ".base_layer" + key[suffix_pos:] else: new_key = prefix + key offload_index[key]["weight_name"] = new_key offload_index[new_key] = offload_index[key] del offload_index[key] files_seen = set() # rename safetensors for dispatch for new_key in list(offload_index.keys()): fname = offload_index[new_key]["safetensors_file"] # make a new file name new_fname_list = list(fname.split(os.sep)) for i, name in enumerate(new_fname_list): if "--" in name: new_fname_list[i] += "-peft" break new_fname = os.path.join(*new_fname_list) if fname in files_seen: continue safe_dict = {} with safe_open(fname, framework="pt") as f: for safe_key in f.keys(): safe_tensor = f.get_tensor(safe_key) metadata = f.metadata() suffix_pos = safe_key.rfind(".") extended_prefix = prefix + block_id + safe_key[:suffix_pos] safe_module = dict(self.named_modules())[extended_prefix] if isinstance(safe_module, BaseTunerLayer): final_key = extended_prefix + ".base_layer" + safe_key[suffix_pos:] lora_dict = {key: val for key, val in adapters_weights.items() if extended_prefix in key} # add LoRA keys and values to disk offload for lora_key, lora_val in lora_dict.items(): divide = lora_key.rfind(".") new_key = lora_key[:divide] + f".{adapter_name}" + lora_key[divide:] safe_dict[new_key] = lora_val else: final_key = prefix + block_id + safe_key safe_dict[final_key] = safe_tensor files_seen.add(new_fname) # avoid overwriting original safetensors for key in safe_dict.keys(): offload_index[key] = {"safetensors_file": new_fname, "weight_name": key} base_name = os.path.dirname(new_fname) if not os.path.exists(base_name): os.makedirs(base_name) safe_save_file(safe_dict, new_fname, metadata=metadata) def _check_new_adapter_config(self, peft_config: PeftConfig, is_trainable: bool) -> None: """Perform checks on newly added PEFT configs to ensure integrity.""" if peft_config.is_prompt_learning and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") # Since PiSSA/CorDA/OLoRA modifies the base weights, it should not be combined with other adapters. all_configs = [peft_config] + list(self.peft_config.values()) if len(all_configs) > 1: if any(getattr(config, "init_lora_weights", None) == "pissa" for config in all_configs): msg = ( "PiSSA changes the base weights of the model and should thus not be used with other adapters. " "Consider converting the PiSSA adapter into a normal LoRA adapter: " "https://github.com/huggingface/peft/tree/main/examples/pissa_finetuning#convert-pissa-to-lora" ) warnings.warn(msg) elif any(getattr(config, "init_lora_weights", None) == "corda" for config in all_configs): msg = ( "CorDA changes the base weights of the model and should thus not be used with other adapters. " "Consider converting the CorDA adapter into a normal LoRA adapter: " "https://github.com/huggingface/peft/tree/main/examples/corda_finetuning#convert-corda-to-lora" ) warnings.warn(msg) elif any(getattr(config, "init_lora_weights", None) == "olora" for config in all_configs): msg = ( "OLoRA changes the base weights of the model and should thus not be used with other adapters. " "Consider converting the OLoRA adapter into a normal LoRA adapter: " "https://github.com/huggingface/peft/tree/main/examples/olora_finetuning#olora-and-lora" ) warnings.warn(msg) def load_adapter( self, model_id: Union[str, os.PathLike], adapter_name: str, is_trainable: bool = False, torch_device: Optional[str] = None, autocast_adapter_dtype: bool = True, ephemeral_gpu_offload: bool = False, low_cpu_mem_usage: bool = False, key_mapping: Optional[dict[str, str]] = None, **kwargs: Any, ): """ Load a trained adapter into the model. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: model_id (`str` or `os.PathLike`): The name of the PEFT configuration to use. Can be either: - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face Hub. - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` method (`./my_peft_config_directory/`). adapter_name (`str`): The name of the adapter to be added. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. torch_device (`str`, *optional*, defaults to None): The device to load the adapter on. If `None`, the device will be inferred. autocast_adapter_dtype (`bool`, *optional*, defaults to `True`): Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect select PEFT tuners. ephemeral_gpu_offload (`bool`, *optional*, defaults to `False`): Whether to use ephemeral GPU offloading for partially loaded modules. Defaults to `False`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device before loading the saved weights. Useful to speed up the process. key_mapping (dict, *optional*, defaults to None) Extra mapping of PEFT `state_dict` keys applied before loading the `state_dict`. When this mapping is applied, the PEFT-specific `"base_model.model"` prefix is removed beforehand and the adapter name (e.g. `"default"`) is not inserted yet. Only pass this argument if you know what you're doing. kwargs: (`optional`): Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub. """ from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING hf_hub_download_kwargs, kwargs = self._split_kwargs(kwargs) if torch_device is None: torch_device = infer_device() if adapter_name not in self.peft_config: # load the config peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, **hf_hub_download_kwargs, ) ].from_pretrained( model_id, ephemeral_gpu_offload=ephemeral_gpu_offload, **hf_hub_download_kwargs, ) self._check_new_adapter_config(peft_config, is_trainable=is_trainable) peft_config.inference_mode = not is_trainable self.add_adapter(adapter_name, peft_config, low_cpu_mem_usage=low_cpu_mem_usage) adapters_weights = load_peft_weights( model_id, device=torch_device, key_mapping=key_mapping, **hf_hub_download_kwargs ) # load the weights into the model ignore_mismatched_sizes = kwargs.get("ignore_mismatched_sizes", False) load_result = set_peft_model_state_dict( self, adapters_weights, adapter_name=adapter_name, ignore_mismatched_sizes=ignore_mismatched_sizes, low_cpu_mem_usage=low_cpu_mem_usage, ) tuner = self.peft_config[adapter_name].peft_type tuner_prefix = PEFT_TYPE_TO_PREFIX_MAPPING.get(tuner, "") adapter_missing_keys = [] # Filter missing keys specific to the current adapter and tuner prefix. for key in load_result.missing_keys: if tuner_prefix in key and adapter_name in key: adapter_missing_keys.append(key) load_result.missing_keys.clear() load_result.missing_keys.extend(adapter_missing_keys) if ( (getattr(self, "hf_device_map", None) is not None) and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) and len(self.peft_config) == 1 ): device_map = kwargs.get("device_map", "auto") max_memory = kwargs.get("max_memory", None) offload_folder = kwargs.get("offload_folder", None) offload_dir = kwargs.get("offload_dir", None) offload_index = kwargs.get("offload_index", None) if offload_dir is not None and offload_folder is not None: # see https://github.com/huggingface/peft/issues/2541 raise ValueError("Cannot use `offload_folder` when `offload_dir` is specified.") elif offload_dir is None: # to keep backwards compatibility offload_dir = offload_folder dispatch_model_kwargs = {} # Safety checker for previous `accelerate` versions # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ if "offload_index" in inspect.signature(dispatch_model).parameters: dispatch_model_kwargs["offload_index"] = offload_index no_split_module_classes = self._no_split_modules if device_map != "sequential": max_memory = get_balanced_memory( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes, low_zero=(device_map == "balanced_low_0"), ) if isinstance(device_map, str): device_map = infer_auto_device_map( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes ) self._update_offload(offload_index, adapters_weights) dispatch_model_kwargs["offload_index"] = offload_index dispatch_model( self, device_map=device_map, offload_dir=offload_dir, **dispatch_model_kwargs, ) hook = AlignDevicesHook(io_same_device=True) if self.peft_config[adapter_name].is_prompt_learning: remove_hook_from_submodules(self.prompt_encoder) add_hook_to_module(self.get_base_model(), hook) if hasattr(self.base_model, "_cast_adapter_dtype"): self.base_model._cast_adapter_dtype( adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype ) # Set model in evaluation mode to deactivate Dropout modules by default if not is_trainable: self.eval() return load_result def set_adapter(self, adapter_name: str) -> None: """ Sets the active adapter. Only one adapter can be active at a time. Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str`): The name of the adapter to be set as active. The adapter must be loaded first. """ if adapter_name not in self.peft_config: raise ValueError(f"Adapter {adapter_name} not found.") self.active_adapter = adapter_name if not self.peft_config[adapter_name].is_prompt_learning: self.base_model.set_adapter(adapter_name) _set_adapter(self, adapter_name) @property def base_model_torch_dtype(self): return getattr(self.base_model, "dtype", None) @property def active_peft_config(self): return self.peft_config[self.active_adapter] def _get_peft_specific_model_tags(self): """Derive tags for the model card from the adapter's config. For example, setting the base model is important for enabling support for HF inference providers but it also makes models more searchable on the HF hub. """ peft_method = self.active_peft_config.peft_type if not isinstance(peft_method, str): peft_method = peft_method.value tags = [] if hasattr(self.base_model, "model") and isinstance(self.base_model.model, transformers.PreTrainedModel): tags.append("transformers") if peft_method == "LORA": tags.append("lora") if hasattr(self.base_model, "name_or_path"): tags.append(f"base_model:adapter:{self.base_model.name_or_path}") return tags def create_or_update_model_card(self, output_dir: str): """ Updates or create model card to include information about peft: 1. Adds `peft` library tag 2. Adds peft version 3. Adds base model info 4. Adds quantization information if it was used """ filename = os.path.join(output_dir, "README.md") card = ModelCard.load(filename) if os.path.exists(filename) else ModelCard.from_template(ModelCardData()) card.data["library_name"] = "peft" tags = set() base_model = self.get_base_model() if hasattr(base_model, "model_tags"): tags = tags.union(base_model.model_tags or []) tags = tags.union(self._get_peft_specific_model_tags()) if tags: card.data["tags"] = sorted(tags) # One of the rare moments where we can select the pipeline tag with certainty, so let's do that. # Makes it easier to deploy an adapter with auto inference since the user doesn't have to add any tags. if not card.data.pipeline_tag and isinstance(self, PeftModelForCausalLM): card.data.pipeline_tag = "text-generation" model_config = BaseTuner.get_model_config(self) model_config = None if model_config == DUMMY_MODEL_CONFIG else model_config if model_config is not None and "_name_or_path" in model_config: card.data["base_model"] = model_config["_name_or_path"] lines = card.text.splitlines() quantization_config = None if hasattr(model_config, "quantization_config"): quantization_config = self.config.quantization_config.to_dict() training_config_text = "" quantization_prefix = "The following `bitsandbytes` quantization config was used during training:" # Adds quantization information if it was used if quantization_config is not None: training_config_text += f"\n{quantization_prefix}\n" training_config_text += "\n".join([f"- {name}: {value}" for name, value in quantization_config.items()]) training_config_text += "\n" training_procedure_heading = "## Training procedure" if quantization_prefix not in lines and bool(training_config_text): if training_procedure_heading in lines: lines.insert(lines.index(training_procedure_heading) + 2, training_config_text) else: lines.append(f"{training_procedure_heading}\n{training_config_text}") # Adds peft version framework_block_heading = "### Framework versions" if f"- PEFT {__version__}" not in lines: if framework_block_heading in lines: lines.insert(lines.index(framework_block_heading) + 2, f"- PEFT {__version__}") else: lines.append(f"{framework_block_heading}\n\n- PEFT {__version__}") card.text = "\n".join(lines) card.save(filename) class PeftModelForSequenceClassification(PeftModel): """ Peft model for sequence classification tasks. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`. autocast_adapter_dtype (`bool`, *optional*): Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect select PEFT tuners. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForSequenceClassification >>> from peft import PeftModelForSequenceClassification, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "SEQ_CLS", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 768, ... "num_transformer_submodules": 1, ... "num_attention_heads": 12, ... "num_layers": 12, ... "encoder_hidden_size": 768, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForSequenceClassification(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 ``` """ def __init__( self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default", **kwargs ) -> None: classifier_module_names = ["classifier", "score"] if hasattr(peft_config, "modules_to_save"): if peft_config.modules_to_save is None: peft_config.modules_to_save = classifier_module_names[:] else: peft_config.modules_to_save.extend(classifier_module_names) # The modification of peft_config must happen before the init call as the `modules_to_save` information # will be used to guard the target layer matching against matching `modules_to_save` layers. Only the # config is relevant for this, the `modules_to_save` attribute can follow later. super().__init__(model, peft_config, adapter_name, **kwargs) if hasattr(peft_config, "modules_to_save"): for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable; this may add a new ModulesToSaveWrapper _set_trainable(self, adapter_name, module_names=getattr(peft_config, "modules_to_save", None)) def add_adapter(self, adapter_name: str, peft_config: PeftConfig, low_cpu_mem_usage: bool = False) -> None: """ Add an adapter to the model based on the passed configuration. This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`]. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the process when loading saved adapters. Don't use this option when creating a new PEFT adapter for training. """ # ensure that additional adapters also add the classifier layer to modules_to_save if hasattr(peft_config, "modules_to_save"): classifier_module_names = ["classifier", "score"] if peft_config.modules_to_save is None: peft_config.modules_to_save = classifier_module_names[:] else: peft_config.modules_to_save.extend(classifier_module_names) return super().add_adapter(adapter_name, peft_config, low_cpu_mem_usage=low_cpu_mem_usage) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict peft_config = self.active_peft_config if not peft_config.is_prompt_learning: with self._enable_peft_forward_hooks(**kwargs): kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} if peft_config.peft_type == PeftType.POLY: kwargs["task_ids"] = task_ids return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = _get_batch_size(input_ids, inputs_embeds) past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(labels=labels, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) pooled_output = outputs[1] if len(outputs) > 1 else outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: pooled_output = self.base_model.dropout(pooled_output) logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.base_model.num_labels == 1: self.config.problem_type = "regression" elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.base_model.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForCausalLM(PeftModel): """ Peft model for causal language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`. autocast_adapter_dtype (`bool`, *optional*): Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect select PEFT tuners. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModelForCausalLM, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "CAUSAL_LM", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 1280, ... "num_transformer_submodules": 1, ... "num_attention_heads": 20, ... "num_layers": 36, ... "encoder_hidden_size": 1280, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForCausalLM.from_pretrained("gpt2-large") >>> peft_model = PeftModelForCausalLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544 ``` """ def __init__( self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default", **kwargs ) -> None: super().__init__(model, peft_config, adapter_name, **kwargs) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config if not peft_config.is_prompt_learning: if self.base_model.config.model_type == "mpt": if inputs_embeds is not None: raise AssertionError("forward in MPTForCausalLM does not support inputs_embeds") return self.base_model( input_ids=input_ids, attention_mask=attention_mask, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) if peft_config.peft_type == PeftType.POLY: kwargs["task_ids"] = task_ids with self._enable_peft_forward_hooks(**kwargs): kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: # overwrite past_kv in kwargs # some archs require max_cache_len to re-initialize the cache if input_ids is not None: max_cache_len = input_ids.shape[1] + peft_config.num_virtual_tokens else: max_cache_len = inputs_embeds.shape[1] + peft_config.num_virtual_tokens kwargs["past_key_values"] = self.get_prompt(batch_size, max_cache_len=max_cache_len) return self.base_model(input_ids=input_ids, inputs_embeds=inputs_embeds, **kwargs) elif peft_config.peft_type == PeftType.CPT: return self._cpt_forward(input_ids, inputs_embeds, peft_config, task_ids, batch_size, **kwargs) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # concat prompt labels if labels is not None: prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device) kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _cpt_forward(self, input_ids, inputs_embeds, peft_config, task_ids, batch_size, **kwargs): # Extract labels from kwargs labels = kwargs.pop("labels") device = [i.device for i in [input_ids, inputs_embeds, labels] if i is not None][0] # Extract input_type_mask from kwargs and move it to the same device as labels if "input_type_mask" in kwargs.keys(): input_type_mask = kwargs.pop("input_type_mask").to(device) else: if input_ids is None: N_tokens = inputs_embeds.shape[1] else: N_tokens = input_ids.shape[1] input_type_mask = torch.ones((batch_size, N_tokens)).to(device) * 4 cpt_token_ids = peft_config.cpt_token_ids cpt_tokens_type_mask = peft_config.cpt_tokens_type_mask # Generate embeddings if not provided if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # Get prompt and concatenate with input embeddings prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) # If labels are provided, generate prefix labels and type mask cpt_labels = None if labels is not None: # Generate prefix labels and concatenate with the input labels prefix_labels = torch.Tensor(cpt_token_ids).long().view(1, -1) prefix_labels = prefix_labels.repeat(batch_size, 1).to(labels.device) cpt_labels = torch.cat((prefix_labels, labels), dim=1) # Generate prefix type mask and shift input type mask values to avoid conflicts prefix_type_mask = torch.Tensor(cpt_tokens_type_mask).long().view(1, -1) prefix_type_mask = prefix_type_mask.repeat(batch_size, 1).to(labels.device) adjusted_input_type_mask = input_type_mask adjusted_input_type_mask[adjusted_input_type_mask > 0] += prefix_type_mask.max() # Concatenate prefix and shifted input type masks cpt_type_mask = torch.cat((prefix_type_mask, adjusted_input_type_mask), dim=1) # Identify valid label positions and mask invalid ones with -100 labels_idx = (cpt_type_mask > 0) & (cpt_type_mask % 4 == 0) cpt_labels[~labels_idx] = -100 # Update kwargs with the modified labels kwargs["labels"] = cpt_labels # Pass the modified inputs to the base model base_model_output = self.base_model(inputs_embeds=inputs_embeds, **kwargs) if labels is None: return base_model_output else: # Calculate the loss using the custom CPT loss function cpt_embedding = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type] base_model_output = cpt_embedding.calculate_loss( base_model_output, cpt_labels, cpt_type_mask, self.peft_config["default"] ) return base_model_output def generate(self, *args, **kwargs): peft_config = self.active_peft_config self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation if hasattr(self.base_model, "model"): self.base_model.model.generation_config = self.generation_config else: self.base_model.generation_config = self.generation_config try: if not peft_config.is_prompt_learning: with self._enable_peft_forward_hooks(*args, **kwargs): kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} outputs = self.base_model.generate(*args, **kwargs) else: outputs = self.base_model.generate(**kwargs) except: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation raise else: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation return outputs def prepare_inputs_for_generation(self, *args, task_ids: Optional[torch.Tensor] = None, **kwargs): peft_config = self.active_peft_config model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) # https://github.com/huggingface/transformers/pull/26681/ introduced new cache format # for some architectures which requires a special fix for prompt tuning etc. # TODO: starting with transformers 4.38, all architectures should support caching. uses_transformers_4_38 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.38.0") uses_transformers_4_36 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.36.0") transformers_new_cache_archs = ["llama", "mistral", "persimmon", "phi"] if packaging.version.parse(transformers.__version__) > packaging.version.parse("4.43.3"): # https://github.com/huggingface/transformers/pull/31445 transformers_new_cache_archs.append("bloom") uses_cache = uses_transformers_4_38 or ( uses_transformers_4_36 and self.base_model.config.model_type in transformers_new_cache_archs ) # heuristic to determine if we're in 'prefill stage' (when the KV cache is filled with the values from the # initial input) is_prefill = (model_kwargs.get("cache_position") is not None) and (model_kwargs["cache_position"][0] == 0) if peft_config.peft_type == PeftType.POLY: model_kwargs["task_ids"] = task_ids if peft_config.is_prompt_learning: if uses_cache and (model_kwargs.get("past_key_values", None) is not None): # change in the logic of `prepare_inputs_for_generation` makes the below code necessary # In prompt learning methods, past key values are longer when compared to the `input_ids`. # As such only consider the last input ids in the autogressive generation phase. past_key_values = model_kwargs["past_key_values"] if isinstance(past_key_values, (tuple, list)): seq_len = past_key_values[0][0].shape[-2] else: # using transformers kv cache seq_len = past_key_values.get_seq_length() if seq_len >= model_kwargs["input_ids"].shape[1]: model_kwargs["input_ids"] = model_kwargs["input_ids"][:, -1:] if (attention_mask := model_kwargs.get("attention_mask", None)) is not None: if isinstance(attention_mask, dict): # see: https://github.com/huggingface/transformers/pull/37866 # For now, just deal with the case of a single attention mask if len(attention_mask) != 1: raise ValueError( f"Expected a single attention mask, got {len(attention_mask)} instead, please open an " "issue (https://github.com/huggingface/peft/issues) and report the error." ) attention_mask = list(attention_mask.values())[0] size = model_kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens prefix_attention_mask = torch.ones(size).to(model_kwargs["input_ids"].device) if attention_mask.dim() == 4: # Transform the 4d attention mask to 2d, leave it up to the model to deal with it instead of trying # to create a 4d attention mask here. # from [batch_size, heads, input_ids_length, total_sequence_length] # to [batch_size, total_sequence_length] bs = attention_mask.shape[0] total_seq_len = prefix_attention_mask.shape[1] + attention_mask.shape[2] attention_mask_2d = torch.ones((bs, total_seq_len), dtype=attention_mask.dtype) if is_prefill and (peft_config.peft_type != PeftType.PREFIX_TUNING): # if in prefill stage, for prompt learning methods that are not prefix tuning, new tokens # (embeddings) are inserted, thus set cache_position to correspond to these tokens cache_position_ = torch.arange(total_seq_len, device=model_kwargs["input_ids"].device) else: # prefix tuning acts directly on the cache, no need to upate cache_position cache_position_ = model_kwargs["cache_position"] attention_mask_new = create_attention_mask( self.get_base_model(), model_input=None, attention_mask=attention_mask_2d, past_key_values=model_kwargs.get("past_key_values"), cache_position=cache_position_, batch_size=bs, sequence_length=total_seq_len, position_ids=model_kwargs.get("position_ids", None), ) model_kwargs["attention_mask"] = attention_mask_new else: # 2d attention mask model_kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1) if model_kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") model_kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn( "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" ) kwargs["token_type_ids"] = None cache: transformers.Cache | None = model_kwargs.get("past_key_values", None) # no past_key_values or past_key_values empty cache requires_prompt_injection = (cache is None) or ( isinstance(cache, transformers.Cache) and not cache.get_seq_length() ) if requires_prompt_injection and peft_config.peft_type == PeftType.PREFIX_TUNING: # some archs require max_cache_len to re-initialize the cache, but DynamicCache has no max len if isinstance(cache, transformers.Cache) and not isinstance(cache, transformers.DynamicCache): max_cache_len = cache.max_cache_len else: max_cache_len = -1 # -1 means no max length new_past_key_values = self.get_prompt( batch_size=model_kwargs["input_ids"].shape[0], max_cache_len=max_cache_len, ) model_kwargs["past_key_values"] = new_past_key_values elif requires_prompt_injection: inputs_embeds = self.word_embeddings(model_kwargs["input_ids"]) prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0], task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1) model_kwargs["input_ids"] = None # if we're in the prefill stage if is_prefill and (peft_config.peft_type == PeftType.PREFIX_TUNING): # for prefix tuning, the past_key_values have been prefilled model_kwargs["cache_position"] += peft_config.num_virtual_tokens elif peft_config.peft_type != PeftType.PREFIX_TUNING: # prefix tuning needs cache_position # For transformers>=4.38.0 - for some architectures such as Llama, `cache_position` is passed in the forward # pass to keep track of the position ids of the cache. We have to pop that from `model_kwargs` as # `cache_position` is properly created by the model, using the passed `inputs_embeds`: # https://github.com/huggingface/transformers/blob/593230f0a1150ea9c0477b9d859f25daf73c8c33/src/transformers/models/llama/modeling_llama.py#L956 _ = model_kwargs.pop("cache_position", None) return model_kwargs class PeftModelForSeq2SeqLM(PeftModel): """ Peft model for sequence-to-sequence language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`. autocast_adapter_dtype (`bool`, *optional*): Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect select PEFT tuners. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM >>> from peft import PeftModelForSeq2SeqLM, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "SEQ_2_SEQ_LM", ... "inference_mode": False, ... "r": 8, ... "target_modules": ["q", "v"], ... "lora_alpha": 32, ... "lora_dropout": 0.1, ... "fan_in_fan_out": False, ... "enable_lora": None, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566 ``` """ def __init__( self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default", **kwargs ) -> None: super().__init__(model, peft_config, adapter_name, **kwargs) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation self.base_model_prepare_encoder_decoder_kwargs_for_generation = ( self.base_model._prepare_encoder_decoder_kwargs_for_generation ) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config if not peft_config.is_prompt_learning: if peft_config.peft_type == PeftType.POLY: kwargs["task_ids"] = task_ids with self._enable_peft_forward_hooks(**kwargs): kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_inputs_embeds=decoder_inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if decoder_attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( decoder_attention_mask.device ) if peft_config.peft_type not in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]: decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: # overwrite past_kv in kwargs kwargs["past_key_values"] = self.get_prompt(batch_size) return self.base_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs, ) elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( attention_mask.device ) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) return self.base_model( inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs, ) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if decoder_inputs_embeds is None and decoder_input_ids is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) decoder_inputs_embeds = self.word_embeddings(decoder_input_ids) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( attention_mask.device ) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1) # concat prompt labels if labels is not None: if peft_config.num_transformer_submodules == 1: kwargs["labels"] = labels elif peft_config.num_transformer_submodules == 2: prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device) kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) if peft_config.num_transformer_submodules == 1: return self.base_model(inputs_embeds=inputs_embeds, **kwargs) elif peft_config.num_transformer_submodules == 2: decoder_inputs_embeds = torch.cat( (prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1 ) return self.base_model( inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs ) def generate(self, **kwargs): peft_config = self.active_peft_config self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( self._prepare_encoder_decoder_kwargs_for_generation ) try: if not peft_config.is_prompt_learning: with self._enable_peft_forward_hooks(**kwargs): kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} outputs = self.base_model.generate(**kwargs) else: if "input_ids" not in kwargs: raise ValueError("input_ids must be provided for Peft model generation") if kwargs.get("position_ids", None) is not None: warnings.warn( "Position ids are not supported for parameter efficient tuning. Ignoring position ids." ) kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn( "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" ) kwargs["token_type_ids"] = None if peft_config.peft_type == PeftType.PREFIX_TUNING: outputs = self.base_model.generate(**kwargs) elif peft_config.peft_type in [ PeftType.PROMPT_TUNING, PeftType.P_TUNING, PeftType.MULTITASK_PROMPT_TUNING, ]: kwargs = deepcopy(kwargs) if "encoder_outputs" in kwargs: del kwargs["encoder_outputs"] warnings.warn( "`encoder_outputs` should not be passed to `generate` when using prompt tuning. Ignoring it." ) input_ids = kwargs.pop("input_ids") inputs_embeds = self.word_embeddings(input_ids) batch_size = inputs_embeds.shape[0] prompts = self.get_prompt(batch_size=batch_size, task_ids=kwargs.pop("task_ids", None)) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) kwargs["inputs_embeds"] = inputs_embeds if "attention_mask" in kwargs: prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( kwargs["attention_mask"].device ) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, kwargs["attention_mask"]), dim=1) return self.base_model.generate(**kwargs) else: raise NotImplementedError except: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( self.base_model_prepare_encoder_decoder_kwargs_for_generation ) raise else: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( self.base_model_prepare_encoder_decoder_kwargs_for_generation ) return outputs def prepare_inputs_for_generation(self, *args, task_ids: torch.Tensor = None, **kwargs): peft_config = self.active_peft_config model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) if peft_config.peft_type == PeftType.POLY: model_kwargs["task_ids"] = task_ids elif peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = model_kwargs.get("past_key_values", None) cache_position = model_kwargs.get("cache_position", [None]) # check prefill stage is_prefill_stage = ( # old cache implementation (past_key_values is None) # new cache implementation or (isinstance(past_key_values, Cache) and (cache_position[0] == 0)) ) if is_prefill_stage: batch_size = model_kwargs["decoder_input_ids"].shape[0] new_past_key_values = self.get_prompt(batch_size) model_kwargs["past_key_values"] = new_past_key_values return model_kwargs class PeftModelForTokenClassification(PeftModel): """ Peft model for token classification tasks. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`. autocast_adapter_dtype (`bool`, *optional*): Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect select PEFT tuners. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForSequenceClassification >>> from peft import PeftModelForTokenClassification, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "TOKEN_CLS", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 768, ... "num_transformer_submodules": 1, ... "num_attention_heads": 12, ... "num_layers": 12, ... "encoder_hidden_size": 768, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForTokenClassification.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForTokenClassification(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 ``` """ def __init__( self, model: torch.nn.Module, peft_config: PeftConfig = None, adapter_name: str = "default", **kwargs ) -> None: super().__init__(model, peft_config, adapter_name, **kwargs) classifier_module_names = ["classifier", "score"] if hasattr(peft_config, "modules_to_save"): if peft_config.modules_to_save is None: peft_config.modules_to_save = classifier_module_names[:] else: peft_config.modules_to_save.extend(classifier_module_names) for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable; this may add a new ModulesToSaveWrapper _set_trainable(self, adapter_name, module_names=getattr(peft_config, "modules_to_save", None)) def add_adapter(self, adapter_name: str, peft_config: PeftConfig, low_cpu_mem_usage: bool = False) -> None: """ Add an adapter to the model based on the passed configuration. This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`]. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the process when loading saved adapters. Don't use this option when creating a new PEFT adapter for training. """ # ensure that additional adapters also add the classifier layer to modules_to_save if hasattr(peft_config, "modules_to_save"): classifier_module_names = ["classifier", "score"] if peft_config.modules_to_save is None: peft_config.modules_to_save = classifier_module_names[:] else: peft_config.modules_to_save.extend(classifier_module_names) return super().add_adapter(adapter_name, peft_config, low_cpu_mem_usage=low_cpu_mem_usage) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config return_dict = return_dict if return_dict is not None else self.config.use_return_dict if not peft_config.is_prompt_learning: with self._enable_peft_forward_hooks(**kwargs): kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} if peft_config.peft_type == PeftType.POLY: kwargs["task_ids"] = task_ids return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = _get_batch_size(input_ids, inputs_embeds) past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(labels=labels, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) sequence_output = outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: sequence_output = self.base_model.dropout(sequence_output) logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForQuestionAnswering(PeftModel): """ Peft model for extractive question answering. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`. autocast_adapter_dtype (`bool`, *optional*): Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect select PEFT tuners. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForQuestionAnswering >>> from peft import PeftModelForQuestionAnswering, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "QUESTION_ANS", ... "inference_mode": False, ... "r": 16, ... "target_modules": ["query", "value"], ... "lora_alpha": 32, ... "lora_dropout": 0.05, ... "fan_in_fan_out": False, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForQuestionAnswering.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForQuestionAnswering(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 592900 || all params: 108312580 || trainable%: 0.5473971721475013 ``` """ def __init__( self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default", **kwargs ) -> None: super().__init__(model, peft_config, adapter_name, **kwargs) qa_module_names = ["qa_outputs"] if hasattr(peft_config, "modules_to_save"): if peft_config.modules_to_save is None: peft_config.modules_to_save = qa_module_names[:] else: peft_config.modules_to_save.extend(qa_module_names) for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable; this may add a new ModulesToSaveWrapper _set_trainable(self, adapter_name, module_names=getattr(peft_config, "modules_to_save", None)) def add_adapter(self, adapter_name: str, peft_config: PeftConfig, low_cpu_mem_usage: bool = False) -> None: """ Add an adapter to the model based on the passed configuration. This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`]. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the process when loading saved adapters. Don't use this option when creating a new PEFT adapter for training. """ # ensure that additional adapters also add the classifier layer to modules_to_save if hasattr(peft_config, "modules_to_save"): qa_module_names = ["qa_outputs"] if peft_config.modules_to_save is None: peft_config.modules_to_save = qa_module_names[:] else: peft_config.modules_to_save.extend(qa_module_names) return super().add_adapter(adapter_name, peft_config, low_cpu_mem_usage=low_cpu_mem_usage) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config return_dict = return_dict if return_dict is not None else self.config.use_return_dict if not peft_config.is_prompt_learning: if peft_config.peft_type == PeftType.POLY: kwargs["task_ids"] = task_ids with self._enable_peft_forward_hooks(**kwargs): kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, start_positions=start_positions, end_positions=end_positions, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "start_positions": start_positions, "end_positions": end_positions, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = _get_batch_size(input_ids, inputs_embeds) past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(start_positions=start_positions, end_positions=end_positions, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) sequence_output = outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: sequence_output = self.base_model.dropout(sequence_output) logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForFeatureExtraction(PeftModel): """ Peft model for extracting features/embeddings from transformer models Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`. autocast_adapter_dtype (`bool`, *optional*): Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect select PEFT tuners. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. Example: ```py >>> from transformers import AutoModel >>> from peft import PeftModelForFeatureExtraction, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "FEATURE_EXTRACTION", ... "inference_mode": False, ... "r": 16, ... "target_modules": ["query", "value"], ... "lora_alpha": 32, ... "lora_dropout": 0.05, ... "fan_in_fan_out": False, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModel.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForFeatureExtraction(model, peft_config) >>> peft_model.print_trainable_parameters() ``` """ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default", **kwargs): super().__init__(model, peft_config, adapter_name, **kwargs) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config if not peft_config.is_prompt_learning: if peft_config.peft_type == PeftType.POLY: kwargs["task_ids"] = task_ids with self._enable_peft_forward_hooks(**kwargs): kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args} return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: # overwrite past_kv in kwargs kwargs["past_key_values"] = self.get_prompt(batch_size) return self.base_model(input_ids=input_ids, **kwargs) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) @dataclass class TunerLayerStatus: name: str module_type: str enabled: bool active_adapters: list[str] merged_adapters: list[str] requires_grad: dict[str, bool | Literal["irregular"]] available_adapters: list[str] devices: dict[str, list[str]] def get_layer_status(model: torch.nn.Module) -> list[TunerLayerStatus]: """Get the status of each adapter layer in the model. This function returns a list of `TunerLayerStatus` dataclass instances, each of which contains the following attributes: - `name` (`str`): The name of the adapter layer, e.g. `model.encoder.block.0.layer.0.SelfAttention.q`. - `module_type` (`str`): The type of the adapter layer, e.g. `lora.Linear`. - `enabled` (`bool`): Whether the adapter layer is enabled. - `active_adapters` (`list[str]`): The names of the active adapters, if any, e.g. `["default"]`. - `merged_adapters` (`list[str]`): The names of the merged adapters, if any, e.g. `["default"]`. - requires_grad : dict[str, bool | Literal["irregular"]] The requires_grad status of the parameters for each adapter module. Ideally, it should be either `True` or `False`. If the requires_grad status is not consistent across all parameters, the value will be set to `"irregular"`. - `available_adapters` (`list[str]`): The names of the available adapters, e.g. `["default"]`. - `devices` (`dict[str, list[str]]`): The devices where the parameters of the given adapter are stored, e.g. `["cuda"]`. Args: model ([Union[`~PeftModel`, `~transformers.PreTrainedModel`, `nn.Module`]]): The model to get the adapter layer status from. Returns: list[`peft.peft_model.TunerLayerStatus`]: A list of dataclasses, each containing the status of the corresponding adapter layer. """ if isinstance(model, PeftModel): base_model = model.base_model if not isinstance(base_model, BaseTuner): raise TypeError( "get_layer_status() got an invalid PeftModel instance; prefix tuning and adaption prompt are not " "supported." ) else: base_model = model layer_status: list[TunerLayerStatus] = [] for name, module in base_model.named_modules(): if not isinstance(module, BaseTunerLayer): continue # determine if all submodules/parameters if this module require grad or not mapping_requires_grad_list: dict[str, list[bool]] = collections.defaultdict(list) for adapter_module_name in module.adapter_layer_names: adapter_module = getattr(module, adapter_module_name) if isinstance(adapter_module, torch.nn.ModuleDict): for key, submodule in adapter_module.items(): for param in submodule.parameters(): mapping_requires_grad_list[key].append(param.requires_grad) elif isinstance(adapter_module, torch.nn.ParameterDict): for key, param in adapter_module.items(): mapping_requires_grad_list[key].append(param.requires_grad) else: # strange, we don't know how to handle this, ignore for now pass def check_irrgular(vals: list[bool]) -> bool | Literal["irregular"]: if all(vals): return True if not any(vals): return False return "irregular" requires_grad = {key: check_irrgular(vals) for key, vals in mapping_requires_grad_list.items()} devices_dd = collections.defaultdict(list) for adapter_module_name in module.adapter_layer_names + module.other_param_names: adapter_module = getattr(module, adapter_module_name) if isinstance(adapter_module, torch.nn.ModuleDict): for key, submodule in adapter_module.items(): devices_dd[key].extend([param.device.type for param in submodule.parameters()]) elif isinstance(adapter_module, torch.nn.ParameterDict) or ( adapter_module.__class__.__name__ == "BufferDict" ): # VeRA for key, param in adapter_module.items(): devices_dd[key].append(param.device.type) devices = {key: sorted(set(val)) for key, val in devices_dd.items()} status = TunerLayerStatus( name=name, module_type=repr(module).partition("(")[0], enabled=not module.disable_adapters, active_adapters=module.active_adapters, merged_adapters=module.merged_adapters, requires_grad=requires_grad, available_adapters=sorted(module._get_available_adapters()), devices=devices, ) layer_status.append(status) if not layer_status: raise ValueError( "No adapter layers found in the model, please ensure that it's a PEFT model or that you have PEFT adapters " "injected in the model." ) return layer_status @dataclass class TunerModelStatus: base_model_type: str adapter_model_type: str peft_types: dict[str, str] trainable_params: int total_params: int num_adapter_layers: int enabled: bool | Literal["irregular"] active_adapters: list[str] | Literal["irregular"] merged_adapters: list[str] | Literal["irregular"] requires_grad: dict[str, bool | Literal["irregular"]] available_adapters: list[str] devices: dict[str, list[str]] def get_model_status(model: torch.nn.Module) -> TunerModelStatus: """Get the status of tuners of the model. This function returns a `TunerModelStatus` dataclass instance, which contains the following attributes: - `base_model_type` (`str`): The type of the base model, e.g. `T5Model`. - `adapter_model_type` (`str`): The type of the adapter model, e.g. `LoraModel`. - `peft_types` (`dict[str, str]`): The mapping of adapter name to adapter type, e.g. `{"default": "LORA"}`. - `trainable_params` (`int`): The number of trainable parameters in the model. - `total_params` (`int`): The total number of parameters in the model. - `num_adapter_layers` (`int`): The number of adapter layers in the model. - `enabled` (`bool`, `Literal["irregular"]`): Whether all adapter layers are enabled. If some are enabled and some are not, this will be `"irregular"`. This means that your model is in an inconsistent state and might not work as expected. - `active_adapters` (`list[str]`, `Literal["irregular"]`): The names of the active adapters. If the active adapters are not consistent across all layers, this will be `"irregular"`, which means that your model is in an inconsistent state and might not work as expected. - `merged_adapters` (`list[str]`, `Literal["irregular"]`): The names of the merged adapters. If the merged adapters are not consistent across all layers, this will be `"irregular"`, which means that your model is in an inconsistent state and might not work as expected. - `requires_grad` (`dict[str, bool | Literal["irregular"]]`): Whether for the given adapter, all adapter layers have `requires_grad` set to `True` or `False`. If there is a mix, this will be set to `"irregular"`, which means that your model is in an inconsistent state and might not work as expected. - `available_adapters` (`list[str]`): The names of the available adapters, e.g. `["default"]`. - `devices` (`dict[str, list[str]]`): The devices where the parameters of the given adapter are stored, e.g. `["cuda"]`. Args: model ([Union[`~PeftModel`, `~transformers.PreTrainedModel`, `nn.Module`]]): The model to get the adapter layer status from. Returns: `peft.peft_model.TunerModelStatus`: A dataclass containing the status of the model. """ if isinstance(model, PeftModel): if not isinstance(model.base_model, BaseTuner): raise TypeError( "get_model_status() got an invalid PeftModel instance; prefix tuning and adaption prompt are not " "supported." ) base_model_type = model.get_base_model().__class__.__name__ trainable_params, total_params = model.get_nb_trainable_parameters() base_model = model.base_model peft_types = {key: str(config.peft_type).partition(".")[-1] for key, config in base_model.peft_config.items()} adapter_model_type = base_model.__class__.__name__ elif isinstance(model, PreTrainedModel): base_model_type = model.__class__.__name__ trainable_params, total_params = PeftModel.get_nb_trainable_parameters(model) base_model = model peft_types = {} adapter_model_type = "None" else: base_model_type = "other" trainable_params, total_params = PeftModel.get_nb_trainable_parameters(model) base_model = model peft_types = {} adapter_model_type = "None" layer_status = get_layer_status(model) num_adapter_layers = len(layer_status) enabled_set: set[bool] = {status.enabled for status in layer_status} # must be {True}, {False}, or {True, False} enabled: bool | Literal["irregular"] if len(enabled_set) == 1: enabled = enabled_set.pop() else: enabled = "irregular" available_adapters: list[str] = sorted(set().union(*(status.available_adapters for status in layer_status))) # ideally, active adapters should be consistent across all layers of the model, but we cannot guarantee it all_active_adapters: set[tuple[str, ...]] = {tuple(status.active_adapters) for status in layer_status} active_adapters: list[str] | Literal["irregular"] if not all_active_adapters: active_adapters = [] elif len(all_active_adapters) == 1: active_adapters = list(all_active_adapters.pop()) else: active_adapters = "irregular" # Here we determine what adapters are merged. This is not trivial because multiple adapters can be merged or not at # the same time. Some layers may only have adapter A, some only adapter B, so it's not as easy as just checking # which adapters are merged on each layer. # First, determine all adapters that are merged on at least on module. merged_all: set[str] = set() for status in layer_status: merged_all.update(status.merged_adapters) # Next, check if on any layer, on of these adapters is not merged. merged_adapters: list[str] | Literal["irregular"] = sorted(merged_all) for status in layer_status: unmerged = set(status.available_adapters) - set(status.merged_adapters) if unmerged & merged_all: # there is overlap between unmerged adapters and adapters that should be merged merged_adapters = "irregular" break # check status of requires_grad # first, merge the values for all layers requires_grad_all: dict[str, list[bool | Literal["irregular"]]] = collections.defaultdict(list) for status in layer_status: for key, val in status.requires_grad.items(): requires_grad_all[key].append(val) # then, check if the values are consistent def check_irrgular(vals: list[bool | Literal["irregular"]]) -> bool | Literal["irregular"]: if all(val is True for val in vals): return True if all(val is False for val in vals): return False return "irregular" requires_grad = {key: check_irrgular(vals) for key, vals in requires_grad_all.items()} devices_dd = collections.defaultdict(list) for status in layer_status: for key, val in status.devices.items(): devices_dd[key].extend(val) devices = {key: sorted(set(val)) for key, val in devices_dd.items()} adapter_model_status = TunerModelStatus( base_model_type=base_model_type, adapter_model_type=adapter_model_type, peft_types=peft_types, trainable_params=trainable_params, total_params=total_params, num_adapter_layers=num_adapter_layers, enabled=enabled, active_adapters=active_adapters, merged_adapters=merged_adapters, requires_grad=requires_grad, available_adapters=available_adapters, devices=devices, ) return adapter_model_status def __getattr__(name): if name == "PEFT_TYPE_TO_MODEL_MAPPING": # This is for backwards compatibility: In #2282, PEFT_TYPE_TO_MODEL_MAPPING was removed as it was redundant with # PEFT_TYPE_TO_TUNER_MAPPING. However, third party code could still use this mapping, e.g.: # https://github.com/AutoGPTQ/AutoGPTQ/blob/6689349625de973b9ee3016c28c11f32acf7f02c/auto_gptq/utils/peft_utils.py#L8 # TODO: Remove after 2026-01 msg = ( "PEFT_TYPE_TO_MODEL_MAPPING is deprecated, please use `from peft import PEFT_TYPE_TO_TUNER_MAPPING` instead. " "The deprecated variable will be removed in 2026." ) warnings.warn(msg, category=DeprecationWarning) return PEFT_TYPE_TO_TUNER_MAPPING raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
peft/src/peft/peft_model.py/0
{ "file_path": "peft/src/peft/peft_model.py", "repo_id": "peft", "token_count": 72428 }
211
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The implementation is based on "Parameter-Efficient Orthogonal Finetuning # via Butterfly Factorization" (https://huggingface.co/papers/2311.06243) in ICLR 2024. from __future__ import annotations from dataclasses import dataclass, field from typing import Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class BOFTConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`BOFTModel`]. Args: boft_block_size (`int`): BOFT block size across different layers. boft_block_num (`int`): Number of BOFT blocks per injected layer. boft_n_butterfly_factor (`int`): Number of butterfly factors across different layers. target_modules (`Union[List[str],str]`): The names of the modules to apply the adapter to. exclude_modules (`Optional[Union[List[str], str]]`): The names of the modules to not apply the adapter. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. boft_dropout (`float`): The multiplicative dropout probability, by setting OFT blocks to identity during training, similar to the dropout layer in LoRA. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. bias (`str`): Bias type for BOFT. Can be 'none', 'all' or 'boft_only'. If 'all' or 'boft_only', the corresponding biases will be updated during training. Be aware that this means that, even when disabling the adapters, the model will not produce the same output as the base model would have without adaptation. modules_to_save (`List[str]`):List of modules apart from BOFT layers to be set as trainable and saved in the final checkpoint. layers_to_transform (`Union[List[int],int]`): The layer indexes to transform, if this argument is specified, it will apply the BOFT transformations on the layer indexes that are specified in this list. If a single integer is passed, it will apply the BOFT transformations on the layer at this index. layers_pattern (`Optional[Union[List[str], str]]`): The layer pattern name, used only if `layers_to_transform` is different from `None` and if the layer pattern is not in the common layers pattern. This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`. """ boft_block_size: int = field( default=4, metadata={ "help": "BOFT block size across different layers.", "note": "You can only specify either boft_block_size or boft_block_num, but not both simultaneously, because boft_block_size x boft_block_num = layer dimension.", }, ) boft_block_num: int = field( default=0, metadata={ "help": "Number of BOFT blocks per injected layer.", "note": "You can only specify either boft_block_size or boft_block_num, but not both simultaneously, because boft_block_size x boft_block_num = layer dimension.", }, ) boft_n_butterfly_factor: int = field( default=1, metadata={ "help": "Number of butterfly factors.", "note": ( "for example, boft_n_butterfly_factor=2, the effective block size of OFT becomes twice as big and the number of blocks become half.", "note: for boft_n_butterfly_factor=1, BOFT is the same as vanilla OFT.", ), }, ) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with BOFT.", "example": "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' ", }, ) exclude_modules: Optional[Union[list[str], str]] = field( default=None, metadata={"help": "List of module names or regex expression of the module names to exclude from BOFT."}, ) boft_dropout: float = field( default=0.0, metadata={ "help": "BOFT multiplicative dropout, randomly setting blocks of OFT to be identity matrix, similar to the dropout layer in LoRA." }, ) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) bias: str = field(default="none", metadata={"help": "Bias type for BOFT. Can be 'none', 'all' or 'boft_only'"}) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": "List of modules apart from BOFT layers to be set as trainable and saved in the final checkpoint. ", "note": ( "For example, in Sequence Classification or Token Classification tasks, ", "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.", ), }, ) init_weights: bool = field( default=True, metadata={ "help": ( "Whether to initialize the weights of the BOFT layers with their default initialization. Don't change ", "this setting, except if you know exactly what you're doing.", ), }, ) layers_to_transform: Optional[Union[list[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index." }, ) layers_pattern: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern. " "This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`." }, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.BOFT self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) self.exclude_modules = ( set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules ) # check for layers_to_transform and layers_pattern if self.layers_pattern and not self.layers_to_transform: raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ") if self.boft_block_size == 0 and self.boft_block_num == 0: raise ValueError( f"Either `boft_block_size` or `boft_block_num` must be non-zero. Currently, boft_block_size = {self.boft_block_size} and boft_block_num = {self.boft_block_num}." ) if not (self.boft_block_size != 0) ^ (self.boft_block_num != 0): raise ValueError( f"You can only specify either boft_block_size ({self.boft_block_size}) or boft_block_num ({self.boft_block_num}), but not both simultaneously, because boft_block_size x boft_block_num == in_features." )
peft/src/peft/tuners/boft/config.py/0
{ "file_path": "peft/src/peft/tuners/boft/config.py", "repo_id": "peft", "token_count": 3160 }
212
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from dataclasses import dataclass, field from typing import Literal, Optional from peft.config import PromptLearningConfig from peft.utils import PeftType, TaskType @dataclass class CPTConfig(PromptLearningConfig): """ CPT Configuration class extending PeftConfig for Context-aware Prompt Tuning (CPT). This class introduces additional parameters required for CPT, such as: - Token type masks - Prompt tuning initialization - Loss weighting - Projection settings For more details, see the paper: https://huggingface.co/papers/2410.17222 """ # Token-related configurations cpt_token_ids: Optional[list[int]] = field( default=None, metadata={"help": "Tensor of token IDs used for CPT prompts."} ) cpt_mask: Optional[list[int]] = field(default=None, metadata={"help": "Tensor mask applied to CPT tokens."}) cpt_tokens_type_mask: Optional[list[int]] = field( default=None, metadata={"help": "Mask indicating the type of each CPT token."} ) # Loss-related configurations opt_weighted_loss_type: Optional[Literal["none", "decay"]] = field( default="none", metadata={"help": "Type of weighted loss: 'none' or 'decay'."} ) opt_loss_decay_factor: Optional[float] = field( default=1.0, metadata={"help": "Factor for exponential decay in loss weighting."} ) # Projection-related configurations opt_projection_epsilon: Optional[float] = field( default=0.1, metadata={"help": "Epsilon value for input projection."} ) opt_projection_format_epsilon: Optional[float] = field( default=0.1, metadata={"help": "Epsilon value for format projection."} ) # Tokenizer configuration tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": "The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" }, ) # Neet to define CPT-specific static attributes is_prompt_learning = True # Indicates that CPT is a prompt-learning method. def __post_init__(self): """ Post-initialization hook to set additional attributes after the config is initialized. """ # CPT-specific static attributes self.is_prompt_learning = True # Indicates that CPT is a prompt-learning method. self.num_layers = None # Number of layers (optional, not always required). self.token_dim = None # Dimension of token embeddings. self.num_attention_heads = None # Number of attention heads (if applicable). self.num_transformer_submodules = 1 # Number of transformer submodules used. self.peft_type = PeftType.CPT # Specifies that the PEFT type is CPT. if self.task_type != TaskType.CAUSAL_LM: # TODO: adjust this to raise an error with PEFT v0.18.0 warnings.warn( f"{self.__class__.__name__} only supports task_type = {TaskType.CAUSAL_LM.value}, " "setting it automatically. This will raise an error starting from PEFT v0.18.0.", FutureWarning, ) self.task_type = TaskType.CAUSAL_LM # Ensures task type is causal language modeling. if self.cpt_token_ids is None: self.cpt_token_ids = [0] self.num_virtual_tokens = len(self.cpt_token_ids) if self.cpt_mask is None: self.cpt_mask = [1 for _ in self.cpt_token_ids] if self.cpt_tokens_type_mask is None: self.cpt_tokens_type_mask = [1 for _ in self.cpt_token_ids] if not ( len(self.cpt_token_ids) == len(self.cpt_mask) == len(self.cpt_tokens_type_mask) == self.num_virtual_tokens ): raise ValueError("cpt_token_ids, cpt_mask and cpt_tokens_type_mask must have the same length.")
peft/src/peft/tuners/cpt/config.py/0
{ "file_path": "peft/src/peft/tuners/cpt/config.py", "repo_id": "peft", "token_count": 1663 }
213
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass, field from typing import Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class LNTuningConfig(PeftConfig): """ This is the configuration class to store the configuration of a :class:`~peft.tuners.LNTuningModel`. Args: target_modules (`Optional[Union[List[str], str]]`): List of module names or regex expression of the module names to replace with LNTuning. For example, '.*decoder.*' or '.*encoder.*'. If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. exclude_modules (`Optional[Union[List[str], str]]`): The names of the modules to not apply the adapter. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. modules_to_save (`Optional[Union[List[str], str]]`): List of modules to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved. """ target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "List of module names or regex expression of the module names to replace with LNTuning." "For example, '.*decoder.*' or '.*encoder.*'. " "If not specified, modules will be chosen according to the model architecture, If the architecture is " "not known, an error will be raised -- in this case, you shoud specify the target modules manually." ), }, ) exclude_modules: Optional[Union[list[str], str]] = field( default=None, metadata={"help": "List of module names or regex expression of the module names to exclude from LNTuning."}, ) modules_to_save: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "List of modules to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.LN_TUNING
peft/src/peft/tuners/ln_tuning/config.py/0
{ "file_path": "peft/src/peft/tuners/ln_tuning/config.py", "repo_id": "peft", "token_count": 1153 }
214
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Reference code: https://github.com/iboing/CorDA/blob/main/cordalib/decomposition.py # Reference paper: https://huggingface.co/papers/2406.05223 import os from collections.abc import Iterable from typing import Any, Callable, Optional import torch import torch.nn as nn from attr import dataclass from tqdm import tqdm from peft.tuners.lora.config import LoraConfig from peft.tuners.lora.model import LoraModel from peft.utils.other import get_pattern_key @dataclass class CordaEigens: S_WC: torch.Tensor U_WC: torch.Tensor V_WC: torch.Tensor def target_modules(model: nn.Module, config: LoraConfig) -> Iterable[nn.Module]: """ Iterate over CorDA target name and modules of a model. A module is a target if its name is in `config.target_modules` and is `nn.Linear`. """ for name, module in model.named_modules(): if LoraModel._check_target_module_exists(config, name) and isinstance(module, nn.Linear): yield name, module def get_model_device(model: nn.Module) -> str: if hasattr(model, "module"): # Handle DeepSpeed/DataParallel model = model.module return next(iter(model.parameters())).device.type @torch.no_grad() def preprocess_corda( model: nn.Module, lora_config: LoraConfig, run_model: Optional[Callable[[], None]] = None, hooked_model: Optional[nn.Module] = None, ): """ Build necessary CorDA fields for a model. For each `M * N` linear layer, a `M * M` covariance matrix will be built temporarily during the preprocessing process, consuming roughly another `2 * MODEL_SIZE` memory for typical LLMs if model weight is FP16 and covariance is FP32. If that's too much, consider specifying `use_float16_for_covariance` in `lora_config.corda_config`. Args: model (`nn.Module`): Model to preprocess. lora_config (`LoraConfig`): Lora configuration of the model. `lora_config.corda_config` should be set. run_model (`Optional[Callable[[], None]]`): Callback to run the model when building covariance. Typically you should run model inference on your sample dataset in this callback. Experiments have shown that when token count per sample is 2048, hidden dimension is 4096, collecting 256 distinct samples is enough. If you collect too few or too repetitive samples, the covariance matrix may be low-ranked and unstabilize preprocessing. You can estimate sample count as `HIDDEN_DIM / TOKEN_PER_SAMPLE * 128`. `run_model` can be `None` only if covariance file in `lora_config.corda_config` is already created. hooked_model (`Optional[nn.Module]`): Model to hook when building covariance. If none, original model will be hooked. This is only useful when you want to hook a different model than the one you are training, typically you should leave this `None`. Upon completion, the following fields are set for each target module: eigens.S_WC (`torch.Tensor`): Singular values of the weight matrix. eigens.U_WC (`torch.Tensor`): Left singular vectors of the weight matrix. eigens.V_WC (`torch.Tensor`): Right singular vectors of the weight matrix, multiplied by inverse of covariance matrix. """ cache_file = lora_config.corda_config.cache_file covariance_file = lora_config.corda_config.covariance_file corda_method = lora_config.corda_config.corda_method verbose = lora_config.corda_config.verbose prune_temporary_fields = lora_config.corda_config.prune_temporary_fields # If cache exists, skip building if cache_file is not None and os.path.exists(cache_file) and os.path.getsize(cache_file) > 0: cache = torch.load(cache_file, map_location=get_model_device(model)) for name, module in target_modules(model, lora_config): module.eigens = CordaEigens( S_WC=cache[f"{name}.eigens.S_WC"], U_WC=cache[f"{name}.eigens.U_WC"], V_WC=cache[f"{name}.eigens.V_WC"], ) else: # Specify CorDA method for each layer if corda_method is None: raise ValueError("corda_method is required when cache_file is not provided.") for name, module in target_modules(model, lora_config): module.corda_method = corda_method # Specify CorDA rank for each layer for name, module in target_modules(model, lora_config): r_key = get_pattern_key(lora_config.rank_pattern.keys(), name) module.rank = lora_config.rank_pattern.get(r_key, lora_config.r) # Calculate covariance matrix calib_cov_distribution(model, lora_config, run_model, hooked_model, covariance_file) # Calculate eigens collect_eigens(model, lora_config, verbose) # Crop CorDA eigens so that there's less to save crop_corda_eigens(model, lora_config) # Remove redundant fields if exist if prune_temporary_fields: for name, module in target_modules(model, lora_config): if hasattr(module, "sample_count"): del module.sample_count if hasattr(module, "covariance_matrix"): del module.covariance_matrix if hasattr(module, "corda_method"): del module.corda_method if hasattr(module, "rank"): del module.rank # Save cache to disk if cache_file is not None: cache: dict[str, Any] = {} for name, module in target_modules(model, lora_config): cache[f"{name}.eigens.S_WC"] = module.eigens.S_WC cache[f"{name}.eigens.U_WC"] = module.eigens.U_WC cache[f"{name}.eigens.V_WC"] = module.eigens.V_WC os.makedirs(os.path.dirname(cache_file), exist_ok=True) torch.save(cache, cache_file) @torch.no_grad() def calib_cov_distribution( model: nn.Module, config: LoraConfig, run_model: Optional[Callable[[], None]], hooked_model: Optional[nn.Module], covariance_file: Optional[str], ): if covariance_file is not None and os.path.exists(covariance_file) and os.path.getsize(covariance_file) > 0: all_covariance_matrix = torch.load(covariance_file, map_location=get_model_device(model)) for name, module in target_modules(model, config): module.covariance_matrix = all_covariance_matrix[name] return if run_model is None: raise ValueError("run_model must be specified when covariance file and cache file aren't built.") if hooked_model is None: hooked_model = model hooked_model.eval() def hook(module, input, output): input = input[0].detach().squeeze(0).data ## (context_length = 2048, dim) if not config.corda_config.use_float16_for_covariance: input = input.float() input = input / torch.max(input).abs() # check if input is valid if torch.isnan(input).any() or torch.isinf(input).any(): raise ValueError("Invalid value found in input, please check your input data.") # calculate covariance and check if it's valid covariance = input.t().matmul(input) if torch.isnan(covariance).any() or torch.isinf(covariance).any(): raise ValueError( "Invalid value found in covariance. Please file an issue at https://github.com/huggingface/peft/issues." ) # add to module module.sample_count += 1 module.covariance_matrix += covariance # free memory del covariance, input handles = [] for name, module in target_modules(hooked_model, config): module.sample_count = 0 module.covariance_matrix = 0 handles.append(module.register_forward_hook(hook)) run_model() # Clear the hooks for handle in handles: handle.remove() # In some edge cases you might need to hook a model different from the model to add adapters, # this case you would specify `hooked_model` and set it to a different model from `model`. if hooked_model is not model: targets = {} for name, module in target_modules(model, config): targets[name] = module for name, module in target_modules(hooked_model, config): # There can be modules used only in inference, but not training # Exclude modules not in target model to prevent KeyError in this case if name in targets: targets[name].sample_count = module.sample_count targets[name].covariance_matrix = module.covariance_matrix # Divide by sample count for name, module in target_modules(model, config): module.covariance_matrix /= module.sample_count # Save covariance to disk if covariance_file is not None: all_covariance_matrix = {} for name, module in target_modules(model, config): all_covariance_matrix[name] = module.covariance_matrix os.makedirs(os.path.dirname(covariance_file), exist_ok=True) torch.save(all_covariance_matrix, covariance_file) @torch.no_grad() def collect_eigens( model: nn.Module, config: LoraConfig, verbose: bool, ): """Call collect_eigens_for_layer and store result in key `eigens` of each layer.""" linear_modules = [] for name, module in target_modules(model, config): linear_modules.append((name, module)) if verbose: linear_modules = tqdm(linear_modules, desc="Collecting eigens") for name, module in linear_modules: module.eigens = collect_eigens_for_layer(module, config) @torch.no_grad() def collect_eigens_for_layer( linear: nn.Linear, config: LoraConfig, ) -> CordaEigens: w = linear.weight.data.float() out_dim = w.size(0) in_dim = w.size(1) min_dim = min(in_dim, out_dim) if not hasattr(linear, "covariance_matrix"): raise ValueError( "Covariance matrix not found in linear module. Please do not call this function directly, " "instead call `preprocess_corda`. If your usage is correct but this error still encounters, " "please file an issue at https://github.com/huggingface/peft/issues." ) covariance_matrix = linear.covariance_matrix.float() damp = 0.01 while True: compensate = torch.diag( torch.ones(covariance_matrix.size(0)).to(covariance_matrix.device) * torch.mean(torch.diag(covariance_matrix)) * damp ) fix_covariance_matrix = covariance_matrix + compensate cov_inv = torch.linalg.inv(fix_covariance_matrix) inv_error = torch.dist( fix_covariance_matrix @ cov_inv, torch.eye(covariance_matrix.size(0)).to(get_model_device(linear)) ).item() if inv_error < 0.05: break else: damp = damp * 2 w = w @ fix_covariance_matrix ## w: out_dim, in_dim; covariance_matrix: in_dim, in_dim U, S, Vh = torch.linalg.svd(w, full_matrices=False) V = (Vh @ cov_inv).transpose(0, 1) # Sanity check, temporarily U and V are large, they will be crop after rank search r = min_dim if U.size(0) != out_dim or U.size(1) != r: raise ValueError( f"Matrix U size mismatch: {U.size()} vs. ({out_dim}, {r}), " "please file an issue at https://github.com/huggingface/peft/issues." ) if S.size(0) != r: raise ValueError( f"Matrix S size mismatch: {S.size()} vs. ({r},), " "please file an issue at https://github.com/huggingface/peft/issues." ) if V.size(0) != in_dim or V.size(1) != r: raise ValueError( f"Matrix V size mismatch: {V.size()} vs. ({in_dim}, {r}), " "please file an issue at https://github.com/huggingface/peft/issues." ) # Offload U and V to CPU, they consume too much memory U = U.cpu() V = V.cpu() return CordaEigens( S_WC=S, U_WC=U, V_WC=V, ) @torch.no_grad() def crop_corda_eigens(model: nn.Module, config: LoraConfig): for name, module in target_modules(model, config): # We don't expect saving sliced tensor writes the whole tensor to disk, # so it's necessary to copy the tensors. # Reference: https://github.com/pytorch/pytorch/issues/40157 if module.corda_method == "ipm": module.eigens.S_WC = module.eigens.S_WC[: module.rank].clone() module.eigens.U_WC = module.eigens.U_WC[:, : module.rank].clone().to(get_model_device(model)) module.eigens.V_WC = module.eigens.V_WC[:, : module.rank].clone().to(get_model_device(model)) elif module.corda_method == "kpm": module.eigens.S_WC = module.eigens.S_WC[-module.rank :].clone() module.eigens.U_WC = module.eigens.U_WC[:, -module.rank :].clone().to(get_model_device(model)) module.eigens.V_WC = module.eigens.V_WC[:, -module.rank :].clone().to(get_model_device(model)) else: raise ValueError(f"Invalid corda_method found: {module.corda_method}, it should be 'ipm' or 'kpm'.") # Sanity check if module.eigens.S_WC.size(0) != module.rank: raise ValueError( f"rank mismatch: {module.eigens.S_WC.size(0)} vs. {module.rank}," "please file an issue at https://github.com/huggingface/peft/issues." ) if module.eigens.U_WC.size(0) != module.weight.size(0): raise ValueError( f"U size mismatch: {module.eigens.U_WC.size(0)} vs. {module.weight.size(0)}," "please file an issue at https://github.com/huggingface/peft/issues." ) if module.eigens.U_WC.size(1) != module.rank: raise ValueError( f"U size mismatch: {module.eigens.U_WC.size(1)} vs. {module.rank}," "please file an issue at https://github.com/huggingface/peft/issues." ) if module.eigens.V_WC.size(0) != module.weight.size(1): raise ValueError( f"V size mismatch: {module.eigens.V_WC.size(0)} vs. {module.weight.size(1)}," "please file an issue at https://github.com/huggingface/peft/issues." ) if module.eigens.V_WC.size(1) != module.rank: raise ValueError( f"V size mismatch: {module.eigens.V_WC.size(1)} vs. {module.rank}," "please file an issue at https://github.com/huggingface/peft/issues." )
peft/src/peft/tuners/lora/corda.py/0
{ "file_path": "peft/src/peft/tuners/lora/corda.py", "repo_id": "peft", "token_count": 6533 }
215
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from dataclasses import asdict from enum import Enum from typing import Optional import torch from torch import nn from tqdm import tqdm from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.tuners.tuners_utils import ( BaseTuner, BaseTunerLayer, check_target_module_exists, onload_layer, ) from peft.utils import ( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _get_submodules, get_quantization_config, ) from .aqlm import dispatch_aqlm from .awq import dispatch_awq from .config import OFTConfig from .eetq import dispatch_eetq from .gptq import dispatch_gptq from .hqq import dispatch_hqq from .inc import dispatch_inc from .layer import OFTLayer, dispatch_default class OFTModel(BaseTuner): """ Creates Orthogonal Finetuning model from a pretrained model. The method is described in https://huggingface.co/papers/2306.07280 Args: model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. config ([`OFTConfig`]): The configuration of the OFT model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading process. Returns: `torch.nn.Module`: The OFT model. Example: ```py >>> from diffusers import StableDiffusionPipeline >>> from peft import OFTModel, OFTConfig >>> config_te = OFTConfig( ... r=8, ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], ... module_dropout=0.0, ... init_weights=True, ... ) >>> config_unet = OFTConfig( ... r=8, ... target_modules=[ ... "proj_in", ... "proj_out", ... "to_k", ... "to_q", ... "to_v", ... "to_out.0", ... "ff.net.0.proj", ... "ff.net.2", ... ], ... module_dropout=0.0, ... init_weights=True, ... ) >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> model.text_encoder = OFTModel(model.text_encoder, config_te, "default") >>> model.unet = OFTModel(model.unet, config_unet, "default") ``` **Attributes**: - **model** ([`~torch.nn.Module`]) -- The model to be adapted. - **peft_config** ([`OFTConfig`]): The configuration of the OFT model. """ prefix: str = "oft_" def _check_new_adapter_config(self, config: OFTConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ # TODO: there should be a check if any of the existing adapters actually has bias != "none", or else the check # does not fully correspond to the error message. if (len(self.peft_config) > 1) and (config.bias != "none"): raise ValueError( f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, " "set bias to 'none' for all adapters." ) @staticmethod def _check_target_module_exists(oft_config, key): return check_target_module_exists(oft_config, key) def _create_and_replace( self, oft_config, adapter_name, target, target_name, parent, current_key, **optional_kwargs, ): if current_key is None: raise ValueError("Current Key shouldn't be `None`") kwargs = { "r": oft_config.r, "oft_block_size": oft_config.oft_block_size, "module_dropout": oft_config.module_dropout, "coft": oft_config.coft, "eps": oft_config.eps, "block_share": oft_config.block_share, "use_cayley_neumann": oft_config.use_cayley_neumann, "num_cayley_neumann_terms": oft_config.num_cayley_neumann_terms, "fan_in_fan_out": oft_config.fan_in_fan_out, "init_weights": oft_config.init_weights, "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False), "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False), } quant_methods = ["gptq", "aqlm", "awq"] for quant_method in quant_methods: quantization_config = get_quantization_config(self.model, method=quant_method) if quantization_config is not None: kwargs[f"{quant_method}_quantization_config"] = quantization_config # If it is not a OFTLayer, create a new module, else update it with new adapters if not isinstance(target, OFTLayer): device_map = self.model.hf_device_map if hasattr(self.model, "hf_device_map") else None new_module = self._create_new_module(oft_config, adapter_name, target, device_map=device_map, **kwargs) if adapter_name not in self.active_adapters: # adding an additional adapter: it is not automatically trainable new_module.requires_grad_(False) self._replace_module(parent, target_name, new_module, target) else: target.update_layer( adapter_name, r=oft_config.r, oft_block_size=oft_config.oft_block_size, module_dropout=oft_config.module_dropout, coft=oft_config.coft, eps=oft_config.eps, block_share=oft_config.block_share, use_cayley_neumann=oft_config.use_cayley_neumann, num_cayley_neumann_terms=oft_config.num_cayley_neumann_terms, init_weights=oft_config.init_weights, ) def _replace_module(self, parent, child_name, new_module, child): setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable # child layer wraps the original module, unpack it if hasattr(child, "base_layer"): child = child.base_layer meta = torch.device("meta") # dispatch to correct device for name, module in new_module.named_modules(): if (self.prefix in name) or ("ranknum" in name): if hasattr(child, "qweight"): weight = child.qweight elif hasattr(child, "W_q"): weight = child.W_q elif hasattr(child, "weight"): weight = child.weight elif getattr(child, "in_proj_weight", None) is not None: # MHA weight = child.in_proj_weight else: weight = next(child.parameters()) if not any(p.device == meta for p in module.parameters()): module.to(weight.device) def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: for n, p in model.named_parameters(): if self.prefix not in n: p.requires_grad = False for active_adapter in self.active_adapters: bias = self.peft_config[active_adapter].bias if bias == "none": continue if bias == "all": for n, p in model.named_parameters(): if "bias" in n: p.requires_grad = True elif bias == "oft_only": for name, m in model.named_modules(): if isinstance(m, OFTLayer) and hasattr(m, "bias") and m.bias is not None: m.bias.requires_grad = True else: raise NotImplementedError(f"Requested bias: {bias}, is not implemented.") @staticmethod def _create_new_module(oft_config, adapter_name, target, **kwargs): # Collect dispatcher functions to decide what backend to use for the replaced OFT layer. The order matters, # because the first match is always used. Therefore, the default layers should be checked last. dispatchers = [] # avoid eager bnb import if is_bnb_available(): from .bnb import dispatch_bnb_8bit dispatchers.append(dispatch_bnb_8bit) if is_bnb_4bit_available(): from .bnb import dispatch_bnb_4bit dispatchers.append(dispatch_bnb_4bit) dispatchers.extend( [ dispatch_eetq, dispatch_aqlm, dispatch_awq, dispatch_gptq, dispatch_hqq, dispatch_inc, dispatch_default, ] ) new_module = None for dispatcher in dispatchers: new_module = dispatcher(target, adapter_name, oft_config=oft_config, **kwargs) if new_module is not None: # first match wins break if new_module is None: # no module could be matched raise ValueError( f"Target module {target} is not supported. Currently, only the following modules are supported: " "`torch.nn.Linear`, `torch.nn.Conv2d`." ) return new_module def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.model, name) def get_peft_config_as_dict(self, inference: bool = False): config_dict = {} for key, value in self.peft_config.items(): config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} if inference: config["inference_mode"] = True config_dict[key] = config return config def _set_adapter_layers(self, enabled=True): for module in self.model.modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self) -> None: """Enable all adapters. Call this if you have previously disabled all adapters and want to re-enable them. """ self._set_adapter_layers(enabled=True) def disable_adapter_layers(self): for active_adapter in self.active_adapters: val = self.peft_config[active_adapter].bias if val != "none": msg = ( f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same " "output as the base model would without adaption." ) warnings.warn(msg) self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name): """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. """ for module in self.model.modules(): if isinstance(module, OFTLayer): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) self.active_adapter = adapter_name def _check_merge_allowed(self): """Verify that the configuration supports merging. Currently gptq quantization and replicated layers do not support merging. """ super()._check_merge_allowed() if getattr(self.model, "quantization_method", None) == "gptq": raise ValueError("Cannot merge OFT layers when the model is gptq quantized") if self.peft_config.get("layer_replication"): raise ValueError("Cannot merge OFT layers when base model layers are replicated") @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] ) return peft_config def _unload_and_optionally_merge( self, merge=True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None, ): if merge: self._check_merge_allowed() key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] desc = "Unloading " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue with onload_layer(target): if hasattr(target, "unload_and_optionally_merge_module"): # if layers have special unloading method, like MultiheadAttention, use that unloaded_module = target.unload_and_optionally_merge_module( merge=merge, safe_merge=safe_merge, adapter_names=adapter_names ) self._replace_module(parent, target_name, unloaded_module, target) elif hasattr(target, "base_layer"): if merge: target.merge(safe_merge=safe_merge, adapter_names=adapter_names) self._replace_module(parent, target_name, target.get_base_layer(), target) return self.model def delete_adapter(self, adapter_name: str) -> None: """ Deletes an existing adapter. Args: adapter_name (str): Name of the adapter to be deleted. """ if adapter_name not in list(self.peft_config.keys()): raise ValueError(f"Adapter {adapter_name} does not exist") del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, OFTLayer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapters[:] self.active_adapter = new_adapter or [] self._delete_auxiliary_adapter(adapter_name, new_active_adapters=new_adapter) def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None ) -> torch.nn.Module: r""" This method merges the OFT layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ return self._unload_and_optionally_merge( progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names ) def unload(self) -> torch.nn.Module: """ Gets back the base model by removing all the oft modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False)
peft/src/peft/tuners/oft/model.py/0
{ "file_path": "peft/src/peft/tuners/oft/model.py", "repo_id": "peft", "token_count": 7891 }
216
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from typing import Optional import bitsandbytes as bnb import torch from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.tuners.tuners_utils import check_adapters_to_merge from peft.utils.integrations import dequantize_bnb_weight from peft.utils.other import transpose from .layer import RandLoraLayer, UniqueBaseGrad if is_bnb_available(): class Linear8bitLt(torch.nn.Module, RandLoraLayer): def __init__( self, base_layer: torch.nn.Module, adapter_name: str, randlora_A, randlora_B, r: int = 0, randlora_alpha: int = 0, randlora_dropout: float = 0.0, fan_in_fan_out: bool = False, init_weights: bool = True, **kwargs, ) -> None: super().__init__() RandLoraLayer.__init__(self, base_layer) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer( adapter_name, randlora_A, randlora_B, r, randlora_alpha=randlora_alpha, randlora_dropout=randlora_dropout, init_weights=init_weights, ) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: return for active_adapter in adapter_names: if active_adapter not in self.randlora_lambda.keys(): continue warnings.warn( "Merge RandLora module to 8-bit linear may get different generations due to rounding errors." ) randlora_data = self.get_delta_weight(active_adapter) weight = self.get_base_layer().weight state = self.get_base_layer().state if state.SCB is None: state.SCB = weight.SCB output = dequantize_bnb_weight(weight, state) w_data = output.to(randlora_data.dtype).to(randlora_data.device) + randlora_data if safe_merge and not torch.isfinite(w_data).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) self.get_base_layer().weight = bnb.nn.Int8Params( w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights ).to(weight.device) state.reset_grads() self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter not in self.randlora_lambda.keys(): continue warnings.warn( "Unmerge randlora module to 8-bit linear may get different generations due to rounding errors." ) randlora_data = self.get_delta_weight(active_adapter) weight = self.get_base_layer().weight state = self.get_base_layer().state if state.SCB is None: state.SCB = weight.SCB output = dequantize_bnb_weight(weight, state=state) w_data = output.to(randlora_data.dtype).to(randlora_data.device) - randlora_data self.get_base_layer().weight = bnb.nn.Int8Params( w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights ).to(weight.device) state.reset_grads() def get_scaled_bases(self, adapter, device=None) -> list[torch.Tensor, torch.Tensor]: """ Performs scaling on the smallest random base (randlora_A) and returns randlora_A and randlora_B in the correct order to fit the target layers' dimensions Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ randlora_A = self.randlora_A[adapter] randlora_B = self.randlora_B[adapter] if device is None: device = randlora_B.device dtype = randlora_B.dtype # In case users wants to merge the adapter weights that are in # (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # (b)float16 because some CPUs have slow bf16/fp16 matmuls. cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16) randlora_lambda = self.randlora_lambda[adapter].to(device) randlora_gamma = self.randlora_gamma[adapter].to(device) if cast_to_fp32: randlora_A = randlora_A.float() randlora_B = randlora_B.float() randlora_lambda = randlora_lambda.float() randlora_gamma = randlora_gamma.float() # The trainable parameters are always applied to randlora_A, the smallest basis. min_dim, max_dim = min(self.out_features, self.in_features), max(self.out_features, self.in_features) # As adapted layers may have different shapes and RandLora contains a single shared pair of A and B matrices, # we initialize these matrices with the largest required size for each dimension. # During the forward pass, required submatrices are sliced out from the shared randlora_A and randlora_B. sliced_A = randlora_A[:, : self.num_bases, :min_dim].to(device) sliced_B = randlora_B[:max_dim, : self.num_bases, :].to(device) # Flattening the matrices over the rank and number of bases dimensions is more memory efficient update_B = sliced_B.flatten(start_dim=1) update_A = UniqueBaseGrad.apply(sliced_A, randlora_lambda, randlora_gamma).flatten(end_dim=1) if min_dim == self.in_features: return update_A, update_B return update_B.T, update_A.T def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ update_B, update_A = self.get_scaled_bases(adapter) update = update_B @ update_A output_tensor = transpose(update, self.fan_in_fan_out) scaling = self.scaling[adapter] return output_tensor * scaling def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: """ Perform the forward pass using the RandLora adapter. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Output tensor after applying the RandLora adaptation. Note: This method implements the RandLora-specific forward pass. It applies the shared projections (randlora_A and randlora_B) along with the per-layer trainable parameters (lambda and gamma) to compute the adapter output. """ if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.randlora_lambda.keys(): continue update_B, update_A = self.get_scaled_bases(active_adapter, device=x.device) requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype compute_dtype = update_A.dtype if x.dtype != compute_dtype: x = x.to(compute_dtype) dropout = self.randlora_dropout[active_adapter] x_temp = dropout(x.to(update_A.dtype)) adapter_output = torch.nn.functional.linear(torch.nn.functional.linear(x_temp, update_B), update_A) if requires_conversion: adapter_output = adapter_output.to(expected_dtype) scaling = self.scaling[active_adapter] result = result + adapter_output * scaling # Ensure the output tensor has the same dtype as the input tensor return result.to(x.dtype) def __repr__(self) -> str: rep = super().__repr__() return "randlora." + rep if is_bnb_4bit_available(): class Linear4bit(torch.nn.Module, RandLoraLayer): def __init__( self, base_layer: torch.nn.Module, adapter_name: str, randlora_A, randlora_B, r: int = 0, randlora_alpha: int = 0, randlora_dropout: float = 0.0, fan_in_fan_out: bool = False, init_weights: bool = True, **kwargs, ) -> None: super().__init__() RandLoraLayer.__init__(self, base_layer) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer( adapter_name, randlora_A, randlora_B, r, randlora_alpha=randlora_alpha, randlora_dropout=randlora_dropout, init_weights=init_weights, ) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: return for active_adapter in adapter_names: if active_adapter not in self.randlora_lambda.keys(): continue warnings.warn( "Merge RandLora module to 4-bit linear may get different generations due to rounding errors." ) randlora_data = self.get_delta_weight(active_adapter) weight = self.get_base_layer().weight kwargs = weight.__dict__ w_data = bnb.functional.dequantize_4bit(weight.data, weight.quant_state) + randlora_data if safe_merge and not torch.isfinite(w_data).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to( weight.device ) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter not in self.randlora_lambda.keys(): continue warnings.warn( "Unmerge RandLora module to 4-bit linear may get different generations due to rounding errors." ) randlora_data = self.get_delta_weight(active_adapter) weight = self.get_base_layer().weight kwargs = weight.__dict__ w_data = bnb.functional.dequantize_4bit(weight.data, weight.quant_state) - randlora_data self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to( weight.device ) def get_scaled_bases(self, adapter, device=None) -> list[torch.Tensor, torch.Tensor]: """ Performs scaling on the smallest random base (randlora_A) and returns randlora_A and randlora_B in the correct order to fit the target layers' dimensions Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ randlora_A = self.randlora_A[adapter] randlora_B = self.randlora_B[adapter] if device is None: device = randlora_B.device dtype = randlora_B.dtype # In case users wants to merge the adapter weights that are in # (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # (b)float16 because some CPUs have slow bf16/fp16 matmuls. cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16) randlora_lambda = self.randlora_lambda[adapter].to(device) randlora_gamma = self.randlora_gamma[adapter].to(device) if cast_to_fp32: randlora_A = randlora_A.float() randlora_B = randlora_B.float() randlora_lambda = randlora_lambda.float() randlora_gamma = randlora_gamma.float() # The trainable parameters are always applied to randlora_A, the smallest basis. min_dim, max_dim = min(self.out_features, self.in_features), max(self.out_features, self.in_features) # As adapted layers may have different shapes and RandLora contains a single shared pair of A and B matrices, # we initialize these matrices with the largest required size for each dimension. # During the forward pass, required submatrices are sliced out from the shared randlora_A and randlora_B. sliced_A = randlora_A[:, : self.num_bases, :min_dim].to(device) sliced_B = randlora_B[:max_dim, : self.num_bases, :].to(device) # Flattening the matrices over the rank and number of bases dimensions is more memory efficient update_B = sliced_B.flatten(start_dim=1) update_A = UniqueBaseGrad.apply(sliced_A, randlora_lambda, randlora_gamma).flatten(end_dim=1) if min_dim == self.in_features: return update_A, update_B return update_B.T, update_A.T def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ update_B, update_A = self.get_scaled_bases(adapter) update = update_B @ update_A output_tensor = transpose(update, self.fan_in_fan_out) scaling = self.scaling[adapter] return output_tensor * scaling def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) result = result.clone() for active_adapter in self.active_adapters: if active_adapter not in self.randlora_lambda.keys(): continue update_B, update_A = self.get_scaled_bases(active_adapter, device=x.device) requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype compute_dtype = update_A.dtype if x.dtype != compute_dtype: x = x.to(compute_dtype) dropout = self.randlora_dropout[active_adapter] x_temp = dropout(x.to(update_A.dtype)) adapter_output = torch.nn.functional.linear(torch.nn.functional.linear(x_temp, update_B), update_A) if requires_conversion: adapter_output = adapter_output.to(expected_dtype) scaling = self.scaling[active_adapter] result = result + adapter_output * scaling # Ensure the output tensor has the same dtype as the input tensor return result.to(x.dtype) def __repr__(self) -> str: rep = super().__repr__() return "randlora." + rep
peft/src/peft/tuners/randlora/bnb.py/0
{ "file_path": "peft/src/peft/tuners/randlora/bnb.py", "repo_id": "peft", "token_count": 9574 }
217
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from typing import Optional import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from peft.tuners._buffer_dict import BufferDict from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils.integrations import check_deepspeed_zero3_enabled, gather_params_ctx class TrainableTokensLayer(nn.Module, BaseTunerLayer): # All names of layers that may contain (trainable) adapter weights adapter_layer_names = ("trainable_tokens_delta",) # All names of other parameters that may contain adapter-related parameters other_param_names = ("token_indices", "trainable_tokens_original") def __init__( self, base_layer: nn.Module, adapter_name: str, token_indices: list[int], tied_adapter: Optional[TrainableTokensLayer] = None, **kwargs, ) -> None: super().__init__() self.base_layer = base_layer self._active_adapter = adapter_name self.kwargs = kwargs # wrap the tied adapter in a list so that it is excluded from .(named_)modules() and, therefore, # not included in the state dict since it would be a copy of the tied adapter anyway. self._tied_adapter = [tied_adapter] if tied_adapter else [] # we store the updated weights of particular tokens and their originals. we assume # that the count of new tokens is far smaller than the number of total tokens. # # In case we have weight tying with another token adapter, we'll have no actual # references on our own but use everything from the tied adapter. if not self.tied_adapter: self.trainable_tokens_delta = nn.ParameterDict({}) self.trainable_tokens_original = BufferDict({}) self.token_indices = {} else: self.trainable_tokens_delta = self.tied_adapter.trainable_tokens_delta self.trainable_tokens_original = self.tied_adapter.trainable_tokens_original self.token_indices = self.tied_adapter.token_indices # Mark the weight as unmerged self.merged_adapters = [] @property def tied_adapter(self): if self._tied_adapter: return self._tied_adapter[0] return None def _collect_token_weights(self, weight: torch.Tensor, rows: torch.Tensor, embed_dim: int) -> torch.Tensor: """DeepSpeed zero3 specific code to initialize trainable tokens. Ensures that only the necessary weights are collected to a single rank, initialized, and then shared with all ranks. """ src_rank = 0 # right now, only CUDA is implemented device = torch.device("cuda", torch.cuda.current_device()) with gather_params_ctx([weight], modifier_rank=None): if dist.get_rank() == src_rank: token_weights = weight[rows].clone() else: # build an empty tensor with correct shape/type/device token_weights = torch.empty( (len(rows), embed_dim), dtype=weight.dtype, device=device, ) # share the weights with all ranks dist.broadcast(token_weights, src=src_rank) return token_weights def update_layer(self, adapter_name, **kwargs): if kwargs.get("tied_adapter", None): # as a tied adapter, we're just following whatever the adpater we're tied to does, we don't update anything. return self.token_indices[adapter_name] = kwargs["token_indices"] init_weights = kwargs.get("init_weights", True) # we initialize the delta embedding weights from the base embedding matrix and replace values instead of # adding/subtracting deltas. we do it this way and use `embedding.weight.index_copy()` to write the updated # values during `forward()` to avoid that the user resizing the embedding matrix, effectively filling the new # token space with random values, training the model with TrainableTokensLayer, initializing the model anew - # thus re-initializing the new embeddings again with new random variables. If we would add/subtract deltas # onto the new values, we would get undefined behavior. By replacing the specific token values we always # get defined behavior. weight = self.get_base_layer().weight embed_dim = self.get_base_layer().embedding_dim if init_weights: if check_deepspeed_zero3_enabled(): values = self._collect_token_weights(weight, self.token_indices[adapter_name], embed_dim) else: values = self.weight[self.token_indices[adapter_name]] else: # random init with matching dtype/device values = torch.randn( (len(self.token_indices[adapter_name]), embed_dim), dtype=weight.dtype, device=weight.device, ) self.trainable_tokens_delta[adapter_name] = nn.Parameter(values.clone(), requires_grad=True) self.trainable_tokens_original[adapter_name] = values.clone() self._move_adapter_to_device_of_base_layer(adapter_name) def _check_overlapping_tokens(self, adapter_names): """Raises an error if the token indices of the given adapter names are overlapping. This is currently not supported and can lead to undefined behavior of the model if no specific merging between the overlapping indices' values is applied. """ if len(adapter_names) <= 1: return indices = set() # we take already merged adapters into account as well since they can be overridden by new adapters as well. for adapter_name in set(adapter_names + self.merged_adapters): index_set = set(self.token_indices[adapter_name]) if len(indices.intersection(index_set)): raise ValueError( f"Token indices of adapter {adapter_name} are already defined and would result in " "undefined merging behavior. Only disjunct token indices are currently supported." ) indices.update(index_set) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return self._check_overlapping_tokens(adapter_names) merged = self.base_layer.weight.data for adapter_name in adapter_names: index = torch.tensor(self.token_indices[adapter_name]).to(merged.device) deltas = self.trainable_tokens_delta[adapter_name].to(merged) merged = merged.index_copy(dim=0, index=index, source=deltas) if safe_merge and not torch.isfinite(merged).all(): raise ValueError(f"NaNs detected in the merged weights. The adapter {adapter_name} seems to be broken") self.base_layer.weight.data = merged self.merged_adapters.extend(adapter_names) def unmerge(self) -> None: if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: adapter_name = self.merged_adapters.pop() index = torch.tensor(self.token_indices[adapter_name]).to(self.base_layer.weight.device) originals = self.trainable_tokens_original[adapter_name].to(self.base_layer.weight) self.base_layer.weight.data.index_copy_(dim=0, index=index, source=originals) def get_merged_weights(self, active_adapters): W = self.base_layer.weight for adapter_name in active_adapters: index = torch.tensor(self.token_indices[adapter_name]).to(W.device) deltas = self.trainable_tokens_delta[adapter_name].to(W) W = W.index_copy(dim=0, index=index, source=deltas) return W def forward_adapters(self, x: torch.Tensor, active_adapters, *args, **kwargs) -> torch.Tensor: if self.disable_adapters or not active_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: self._check_overlapping_tokens(active_adapters) W = self.get_merged_weights(active_adapters) # Normally it should be very clear that we're wrapping Embedding layers but there are cases, such as # tying weights with an LM head where the layer we wrap is a Linear layer. Therefore we must choose # accordingly. # # TODO: the isinstance checks, especially the one for nn.Linear, may not hold for quantized layers; # TODO: we may need to find a better way to detect quantized layers. if isinstance(self.base_layer, torch.nn.Embedding): result = F.embedding( input=x, weight=W, padding_idx=self.base_layer.padding_idx, max_norm=self.base_layer.max_norm, norm_type=self.base_layer.norm_type, scale_grad_by_freq=self.base_layer.scale_grad_by_freq, sparse=self.base_layer.sparse, ) elif isinstance(self.base_layer, torch.nn.Linear): # Probably a tied adapter that wraps an LM head. result = F.linear( input=x, weight=W, ) else: raise ValueError( "TrainableTokensLayer wraps an unknown layer type, maybe you are targeting the wrong layer?" ) return result def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: return self.forward_adapters(x, self.active_adapters, *args, **kwargs)
peft/src/peft/tuners/trainable_tokens/layer.py/0
{ "file_path": "peft/src/peft/tuners/trainable_tokens/layer.py", "repo_id": "peft", "token_count": 4476 }
218
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy from contextlib import contextmanager from functools import partial from typing import Optional, Union import torch import torch.nn as nn from peft.tuners.lora.layer import LoraLayer from peft.tuners.lora.model import LoraModel from peft.tuners.tuners_utils import BaseTuner from peft.utils.constants import DUMMY_TARGET_MODULES from peft.utils.save_and_load import set_peft_model_state_dict from .. import lora from .classifier import XLoraClassifier from .config import XLoraConfig from .layer import XLoraConv2dLayer, XLoraEmbeddingLayer, XLoraLinearLayer def convert_layers_to_xlora( base: nn.Module, # PeftModel xloramodel: nn.Module, # XLoraModel config: XLoraConfig, ) -> tuple[int, torch.device | None]: """ Returns the number of swapped layers. """ total_swapped = 0 all_layers = [] device = None for module in base.modules(): # Check the exact type because classes like OPTLearnedPositionalEmbedding inherit from nn.Embedding if isinstance(module, lora.Linear): device = module.lora_A[next(iter(module.lora_A))].weight.device new_layer = XLoraLinearLayer( model=xloramodel, target=module, target_forward=module.forward, layer_number=total_swapped, config=config, ) all_layers.append(new_layer) module.forward = new_layer.forward # type: ignore[method-assign] total_swapped += 1 elif isinstance(module, lora.Embedding): device = module.lora_embedding_A[next(iter(module.lora_embedding_A))].device new_layer = XLoraEmbeddingLayer( model=xloramodel, target=module, target_forward=module.forward, layer_number=total_swapped, config=config, ) all_layers.append(new_layer) module.forward = new_layer.forward # type: ignore[method-assign] total_swapped += 1 elif isinstance(module, lora.Conv2d): device = module.lora_A[next(iter(module.lora_A))].weight.device new_layer = XLoraConv2dLayer( model=xloramodel, target=module, target_forward=module.forward, layer_number=total_swapped, config=config, ) all_layers.append(new_layer) module.forward = new_layer.forward # type: ignore[method-assign] total_swapped += 1 return (total_swapped, device) def _load_adapter_into_lora_model( lora_model: LoraModel, adapter_name: str, model_id: str, torch_device: Optional[str] = None, ephemeral_gpu_offload: bool = False, autocast_adapter_dtype: bool = True, subfolder: Optional[str] = None, **kwargs, ): """ This method emulates the behavior of `PeftModel.from_pretrained`. Updates to `PeftModel.from_pretrained` may need to be reflected here. All params pertain to the adapter (adapter name, model id, `i` is the adapter number in 0 indexing). """ from peft.peft_model import PeftModel from peft.tuners.lora.config import LoraConfig from peft.utils.other import infer_device from peft.utils.save_and_load import load_peft_weights hf_hub_download_kwargs, kwargs = PeftModel._split_kwargs(kwargs) if torch_device is None: torch_device = infer_device() if adapter_name not in lora_model.peft_config: # load the config lora_peft_config = LoraConfig.from_pretrained( model_id, ephemeral_gpu_offload=ephemeral_gpu_offload, subfolder=subfolder, **hf_hub_download_kwargs, ) lora_peft_config.inference_mode = False lora_model.peft_config[adapter_name] = lora_peft_config lora_model.inject_adapter(lora_model.model, adapter_name) adapter_weights = load_peft_weights(model_id, device=torch_device, subfolder=subfolder, **hf_hub_download_kwargs) new_adapter_weights = {} # Rework the keys to contain the adapter numbers for old_key in adapter_weights.keys(): key: str = old_key # Remove all the prefixes until we have model.<...> while not (key.startswith("model.") and not key.startswith("model.model.")): key = key[key.find(".") + 1 :] # We always want model.model key = "model." + key new_adapter_weights[key] = adapter_weights[old_key] # load the weights into the model ignore_mismatched_sizes = kwargs.get("ignore_mismatched_sizes", False) load_result = set_peft_model_state_dict( lora_model, new_adapter_weights, adapter_name=adapter_name, ignore_mismatched_sizes=ignore_mismatched_sizes, ) if len(load_result.unexpected_keys) > 0: raise ValueError( f"Got unexpected keys! Please raise an issue and tag @EricLBuehler.\n\nunexpected_keys={load_result.unexpected_keys}" ) if hasattr(lora_model, "_cast_adapter_dtype"): lora_model._cast_adapter_dtype(adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype) class XLoraModel(BaseTuner): """ Creates an X-LoRA (Mixture of LoRA experts), model from a pretrained transformers model. Currently, this X-LoRA implementation only works with models with a transformer architecture. The method is described in detail in https://huggingface.co/papers/2402.07148. Args: model ([`torch.nn.Module`]): The model to be adapted. config ([`XLoraConfig`]): The configuration of the Lora model. adapter_name (`str`): The name of the adapter, does not affect the LoRA adapter names. Returns: `torch.nn.Module`: The X-LoRA model. Example: ```py >>> from transformers import AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig >>> from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training >>> model_config = AutoConfig.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1") >>> config = XLoraConfig( ... task_type="CAUSAL_LM", ... hidden_size=model_config.hidden_size, ... xlora_depth=4, ... adapters={ ... "adapter_1": "./path/to/the/checkpoint/", ... "adapter_2": "./path/to/the/checkpoint/", ... "adapter_n": "./path/to/the/checkpoint/", ... }, ... ) >>> int8_config = BitsAndBytesConfig(load_in_8bit=True) >>> model = AutoModelForCausalLM.from_pretrained( ... "mistralai/Mistral-7B-Instruct-v0.1", ... trust_remote_code=True, ... attn_implementation="flash_attention_2", ... device_map="cuda:0", ... torch_dtype=torch.bfloat16, ... quantization_config=int8_config, ... ) >>> model = prepare_model_for_kbit_training(4) >>> xlora_model = get_peft_model(model, config) ``` """ def __init__( self, model: nn.Module, config: Union[dict[str, XLoraConfig], XLoraConfig], adapter_name: str, torch_device: Optional[str] = None, ephemeral_gpu_offload: bool = False, autocast_adapter_dtype: bool = True, **kwargs, ) -> None: """ Create a new X-LoRA model Args: model (`nn.Module`): Base model to apply X-LoRA to. config: ([`XLoraConfig`]): X-LoRA configuration object. adapter_name: (`str`): Adapter name for the X-LoRA adapter. torch_device (`str`, *optional*, defaults to None): (For loading the LoRA adapters) The device to load the adapter on. If `None`, the device will be inferred. ephemeral_gpu_offload (`bool`, *optional*, defaults to `False`): (For loading the LoRA adapters) Whether to use ephemeral GPU offloading for partially loaded modules. Defaults to `False`. autocast_adapter_dtype (`bool`, *optional*, defaults to `True`): (For loading the LoRA adapters) Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect select PEFT tuners. kwargs: (`optional`): (For loading the LoRA adapters) Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub. """ nn.Module.__init__(self) if isinstance(config, dict): conf = config[adapter_name] else: conf = config # Create an empty LoraModel base_lora_config = copy.copy(conf) base_lora_config.target_modules = DUMMY_TARGET_MODULES # Imitate a LoraConfig, fields might need to be updated if LoraConfig is updated base_lora_config.layer_replication = None base_lora_config.bias = "none" lora_model = LoraModel(model, base_lora_config, adapter_name) self.xlora_config = conf self.lora_model = lora_model peft_config = conf if hasattr(model.config, "use_cache") and model.config.use_cache: raise ValueError("`use_cache` must be False") adapters_items = peft_config.adapters.items() if hasattr(self.xlora_config, "_subfolders"): adapters_items = zip(peft_config.adapters.items(), self.xlora_config._subfolders) else: adapters_items = peft_config.adapters.items() if hasattr(self.xlora_config, "_subfolders"): for i, (_adapter_name, model_id), subfolder in enumerate(adapters_items): _load_adapter_into_lora_model( lora_model=self.lora_model, adapter_name=str(i), model_id=model_id, torch_device=torch_device, ephemeral_gpu_offload=ephemeral_gpu_offload, autocast_adapter_dtype=autocast_adapter_dtype, subfolder=subfolder, **kwargs, ) else: for i, (_adapter_name, model_id) in enumerate(adapters_items): _load_adapter_into_lora_model( lora_model=self.lora_model, adapter_name=str(i), model_id=model_id, torch_device=torch_device, ephemeral_gpu_offload=ephemeral_gpu_offload, autocast_adapter_dtype=autocast_adapter_dtype, subfolder=None, **kwargs, ) self.lora_model.set_adapter(list(peft_config.adapters.keys())) self._maybe_freeze_all_adapters() total_swapped, device = convert_layers_to_xlora( model, self, peft_config, ) n_classes = len(peft_config.adapters) xlora_classifier = XLoraClassifier(model, peft_config, n_classes, total_swapped, device) # Setup the model internal state self.internal_xlora_classifier = xlora_classifier self.internal_xlora_scalings = None # type: ignore # Controlled by enable_adapter_layers or disable_adapter_layers self.disabled = False def _maybe_freeze_all_adapters(self): self.eval() if not self.xlora_config.use_trainable_adapters: for name, param in self.named_parameters(): if "lora_" in name: param.requires_grad = False def generate(self, *args, **kwargs): kwargs["use_cache"] = False res = self.lora_model.generate(*args, **kwargs) # type: ignore # This is necessary because we use PeftModel.disable_adapter() which reenables the adapters self._maybe_freeze_all_adapters() return res @contextmanager def _enable_peft_forward_hooks(self, *generate_args, **generate_kwargs): def scalings_injection_hook(target, args, kwargs, scalings): # pre-forward hook to inject the adapter_names argument when using mixed adapter batches inference kwargs["scalings"] = scalings return args, kwargs handles_to_remove = None def pre_forward(module, *args, **kwargs): nonlocal handles_to_remove # =========================== Forward pass with "dummy" scalings ================== args_real = args[0] kwargs_real = args[1] kwargs_real.update(kwargs) dummy_scalings = self.internal_xlora_classifier.make_dummy_scalings(*args_real, **kwargs_real) hook_handles = [] for module in self.modules(): if isinstance(module, LoraLayer): pre_forward = partial(scalings_injection_hook, scalings=dummy_scalings) handle = module.register_forward_pre_hook(pre_forward, with_kwargs=True) hook_handles.append(handle) with torch.no_grad(): self.lora_model.disable_adapter_layers() try: scaling_pass_kwargs = kwargs_real.copy() scaling_pass_kwargs["output_hidden_states"] = True scaling_pass_kwargs["return_dict"] = True try: base_output = self.lora_model.model.forward(*args_real, **scaling_pass_kwargs) finally: # Clean everything up for handle in hook_handles: handle.remove() finally: self.lora_model.enable_adapter_layers() xlora_scalings = self.internal_xlora_classifier(result=base_output, *args_real, **kwargs_real) # =========================== Real forward pass with calculated scalings ================== hook_handles = [] for module in self.modules(): if isinstance(module, LoraLayer): pre_forward = partial(scalings_injection_hook, scalings=xlora_scalings) handle = module.register_forward_pre_hook(pre_forward, with_kwargs=True) hook_handles.append(handle) handles_to_remove = hook_handles if not self.disabled: forward_handle = self.lora_model.model.register_forward_pre_hook(pre_forward, with_kwargs=True) # Run the forward pass: first the scaling pass in the hook, and then with the base model yield if not self.disabled: # TODO(EricLBuehler): If we get a forward exception, we may have multiple forward hooks. for handle in handles_to_remove: handle.remove() forward_handle.remove() def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "lora_model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.lora_model, name) @staticmethod def _prepare_adapter_config(peft_config, _model_config): # Handle X-LoRA case return peft_config """ Does nothing. X-LoRA needs adapters to be frozen. """ def _mark_only_adapters_as_trainable(self) -> None: ... """ This enables the X-LoRA adapter. """ def enable_adapter_layers(self) -> None: self.disabled = False """ This diasables the X-LoRA adapter. """ def disable_adapter_layers(self) -> None: self.disabled = True def _create_and_replace( self, lora_config, adapter_name, target, target_name, parent, current_key, ): # Does nothing because XLoraModel has no target modules pass @staticmethod def _check_target_module_exists(lora_config, key): # Does nothing because XLoraModel has no target modules return False def forward(self, *args, **kwargs): return self.lora_model.model(*args, **kwargs) def set_topk_lora(self, value: Optional[int]): """ Sparsely select the specified top_k LoRA experts instead of the default dense method. Set to None to use dense. This is reflected in the config. """ classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore classifier.config.top_k_lora = value def set_global_scaling_weight(self, weight: float): """ Set the global LoRA weight, a scalar to multiply the output of each LoRA adapter by. This is by default 1. This is reflected in the config. """ classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore classifier.config.global_scaling_weight = weight def set_scaling_pass_value(self, value: float | None): """ Set the scaling pass value, the value to set the scalings to during the scaling pass. If the value is None, the scaling pass value will be 1/n where n is the number of adapters. """ classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore classifier._set_override_scaling_pass_value(value) def get_global_scaling_weight(self) -> float: """ Get the global LoRA weight. """ classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore return classifier.config.global_scaling_weight def get_latest_scalings(self) -> Optional[torch.Tensor]: """ Returns the latest scalings prediction, or None if no scalings have been predicted. The tensor is of shape (batch_size, seq_len, n_layers, n_classes). """ return self.internal_xlora_scalings def get_scalings_log(self) -> list[torch.Tensor]: """ Returns a shallow (only copying the list itself not the tensors) copy of the list containing the scalings log. Editing the list does not change the underlying log. The tensors are of shape (batch_size, seq_len, n_layers, n_classes). The seq_len dim may vary with input dimension. """ classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore return classifier.log_scalings.copy() def enable_scalings_logging(self): """ Enable scalings logging. """ classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore classifier.scalings_logging = True def disable_scalings_logging(self): """ Disable scalings logging, without clearing the log. """ classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore classifier.scalings_logging = False def clear_scalings_log(self): """ Clear the scalings log. """ classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore classifier.log_scalings.clear() def get_bucketed_scalings_log(self) -> dict[int, tuple[list[int], list[torch.Tensor]]]: """ Returns bucketed scalings, bucketed by seq_len. Each value consists of the positions (the first) and the associated tensors. The positions are paired with the associated tensors and give the position in the scaling log. """ classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore return classifier._get_bucketed_scalings()
peft/src/peft/tuners/xlora/model.py/0
{ "file_path": "peft/src/peft/tuners/xlora/model.py", "repo_id": "peft", "token_count": 9094 }
219
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Regression testing: check that checkpoints from previous PEFT versions still return the same values. # # For normal regression testing, just run: # # `pytest tests/regression/test_regression.py -s --regression` # # Add `-s` to show potentially useful debugging information. `--regression` is a custom marker that is required for # regression tests not to be skipped. # # To create new regression tests, run: # `HF_TOKEN=<token> REGRESSION_CREATION_MODE=True pytest tests/regression/test_regression.py -s --regression` # # This will *fail* if: # # 1. the git worktree is dirty # 2. the git commit is not tagged # # Note: A Hugging Face Hub token is required to upload the regression artifacts to our # https://huggingface.co/peft-internal-testing repo. This can be done by anyone with write access to the repo but # apparently it is not possible to create a technical token with write access. # # This is important to ensure that the regression artifacts correspond to a specific released version of PEFT. # Therefore, it is recommended to checkout the tag before running the regression tests, e.g. by running: # # `git checkout v0.1.0` # # To override these checks, run: # ``HF_TOKEN=<token> REGRESSION_CREATION_MODE=True REGRESSION_FORCE_MODE=True pytest tests/regression/test_regression.py -s --regression` # # In REGRESSION_CREATION_MODE, one directory will be created in tests/regression/<TEST_NAME>/<PEFT_VERSION>/ for each # test. This will contain the saved adapter, as well as the output of the test of the model for that version. # # In normal testing mode, the saved adapter and output for each version found in the directory # tests/regression/<TEST_NAME>/ will be loaded and compared to the current output. # # When implementing new tests, check the existing ones as well as the description in the docstring of RegressionTester. import os import shutil import subprocess import sys import tempfile import unittest import pytest import torch from huggingface_hub import snapshot_download, upload_folder from torch import nn from transformers import AutoModelForCausalLM, BitsAndBytesConfig from transformers.pytorch_utils import Conv1D import peft from peft import ( AdaLoraConfig, BOFTConfig, IA3Config, LNTuningConfig, LoHaConfig, LoKrConfig, LoraConfig, PeftModel, VBLoRAConfig, VeraConfig, get_peft_model, ) from peft.utils import infer_device from ..testing_utils import require_bitsandbytes, require_deterministic_for_xpu, require_non_cpu PEFT_VERSION = peft.__version__ REGRESSION_DIR = tempfile.mkdtemp(prefix="peft_regression_") HF_TOKEN = os.environ.get("HF_TOKEN") # the repo has to be created manually once, it is not automatically created HF_REPO = "peft-internal-testing/regression-tests" @pytest.fixture(scope="session", autouse=True) def setup_tearndown(): # Use a pytest session-scoped fixture to setup and teardown exactly once per session. AFAICT, unittest does not # provide such a feature # download regression artifacts from Hugging Face Hub at the start snapshot_download( repo_id=HF_REPO, local_dir=REGRESSION_DIR, # Don't use symlink, because this prevents us from properly cleaning up the files once finished local_dir_use_symlinks=False, ) yield # delete regression artifacts at the end of the test session; optionally, upload them first if in creation mode creation_mode = strtobool(os.environ.get("REGRESSION_CREATION_MODE", "False")) if creation_mode: # upload the regression directory to Hugging Face Hub, will overwrite by default upload_folder( repo_id=HF_REPO, folder_path=REGRESSION_DIR, token=HF_TOKEN, ) shutil.rmtree(REGRESSION_DIR) def strtobool(val): """Copied from distutils.util""" val = val.lower() if val in ("y", "yes", "t", "true", "on", "1"): return 1 elif val in ("n", "no", "f", "false", "off", "0"): return 0 else: raise ValueError(f"invalid truth value {val!r}") def save_output(output, name, force=False): path = os.path.join(REGRESSION_DIR, name, PEFT_VERSION) filename = os.path.join(path, "output.pt") if os.path.exists(filename) and not force: return if not os.path.exists(path): os.makedirs(path) if os.path.exists(filename) and force: print(f"Overriding existing output in {filename}", file=sys.stderr) torch.save(output, filename) def save_model(model, name, force=False): path = os.path.join(REGRESSION_DIR, name, PEFT_VERSION) filename = os.path.join(path, peft.utils.SAFETENSORS_WEIGHTS_NAME) if os.path.exists(filename) and not force: return if not os.path.exists(path): os.makedirs(path) if os.path.exists(filename) and force: print(f"Overriding existing model in {path}", file=sys.stderr) model.save_pretrained(path) def load_output(name): filename = os.path.join(REGRESSION_DIR, name, "output.pt") return torch.load(filename, map_location=infer_device()) @pytest.mark.regression class RegressionTester(unittest.TestCase): """Base class for regression testing Child classes must call assert_results_equal_or_store and pass the model outtput, as well as a unique name that describes the setting (e.g. "lora_opt-350m_bnb_4bit"). They also need to implement get_output(model) to get the model output, and load_base_model(name) to load the base model. Don't forget to fix the seed in load_base_model. """ torch_device = infer_device() def setUp(self): self.tol = 1e-4 self.creation_mode = strtobool(os.environ.get("REGRESSION_CREATION_MODE", "False")) self.force_mode = strtobool(os.environ.get("REGRESSION_FORCE_MODE", "False")) if self.force_mode and not self.creation_mode: raise RuntimeError("REGRESSION_FORCE_MODE can only be used together with REGRESSION_CREATION_MODE") if self.creation_mode: self.check_clean_git_status(self.force_mode) if HF_TOKEN is None: raise RuntimeError("HF_TOKEN environment variable must be set in creation mode") def fix_seed(self): torch.manual_seed(0) def check_clean_git_status(self, force): """Ensure that worktree is not dirty and version tag is checked out""" # check that the worktree is clean try: subprocess.check_output(["git", "diff", "--quiet", "HEAD"]) except subprocess.CalledProcessError as exc: if force: print("Overriding despite dirty git worktree", file=sys.stderr) else: raise RuntimeError("Git worktree is dirty") from exc # check that the commit is tagged try: subprocess.check_output(["git", "describe", "--exact-match", "HEAD"]) except subprocess.CalledProcessError as exc: if force: print("Overriding despite non-tagged commit", file=sys.stderr) else: raise RuntimeError("Git commit is not tagged") from exc @require_deterministic_for_xpu def assert_results_equal_or_store(self, model, name): """Check if the outputs are the same or save the outputs if in creation mode.""" if not self.creation_mode: # normal regression testing mode self._assert_results_equal(name) else: output = self.get_output(model) if not torch.isfinite(output).all(): raise RuntimeError(f"Model output for {name} is not finite") output2 = self.get_output(model) if not torch.allclose(output, output2): raise RuntimeError(f"Model output for {name} is not deterministic") save_output(output, name, force=self.force_mode) save_model(model, name, force=self.force_mode) def _assert_results_equal(self, name): path = os.path.join(REGRESSION_DIR, name) versions = os.listdir(path) for version in versions: # each directory corresponds to a version output_loaded = load_output(os.path.join(name, version)) base_model = self.load_base_model() model = PeftModel.from_pretrained(base_model, os.path.join(path, version)) output = self.get_output(model) assert torch.allclose(output_loaded, output, atol=self.tol, rtol=self.tol) def get_output(self, model): raise NotImplementedError def load_base_model(self): raise NotImplementedError ############## # TEST CASES # ############## class TestMlp(RegressionTester): def get_output(self, model): input = torch.arange(90).reshape(9, 10).to(self.torch_device) with torch.inference_mode(): output = model(input) return output def load_base_model(self): class MLP(nn.Module): def __init__(self, bias=True): super().__init__() self.lin0 = nn.Linear(10, 20, bias=bias) self.relu = nn.ReLU() self.lin1 = nn.Linear(20, 2, bias=bias) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = X.float() X = self.lin0(X) X = self.relu(X) X = self.lin1(X) X = self.sm(X) return X self.fix_seed() return MLP().to(self.torch_device) def test_lora(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, target_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_mlp") def test_lora_dora(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, target_modules=["lin0"], use_dora=True, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_dora_mlp") def test_adalora(self): base_model = self.load_base_model() config = AdaLoraConfig( r=8, init_lora_weights=False, target_modules=["lin0"], total_step=1, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "adalora_mlp") def test_ia3(self): base_model = self.load_base_model() config = IA3Config( init_ia3_weights=False, target_modules=["lin0"], feedforward_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "ia3_mlp") def test_ia3_no_ff(self): base_model = self.load_base_model() config = IA3Config( init_ia3_weights=False, target_modules=["lin0"], feedforward_modules=[], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "ia3_no_ff_mlp") def test_loha(self): # TODO self.skipTest("Skipping LoHa for now because init is not seedable") base_model = self.load_base_model() config = LoHaConfig( r=8, init_weights=False, target_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "loha_mlp") def test_lokr(self): # TODO self.skipTest("Skipping LoKr for now because init is not seedable") base_model = self.load_base_model() config = LoKrConfig( r=8, target_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lokr_mlp") def test_lora_modules_to_save(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, target_modules=["lin0"], modules_to_save=["lin1"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_mlp_modules_to_save") def test_boft(self): base_model = self.load_base_model() config = BOFTConfig( boft_block_size=2, target_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "boft_mlp") def test_ln_tuning(self): base_model = self.load_base_model() config = LNTuningConfig(target_modules=["lin0"]) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "ln_tuning_mlp") def test_vera_tuning(self): base_model = self.load_base_model() config = VeraConfig(target_modules=["lin0"]) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "vera_tuning_mlp") def test_vblora_tuning(self): base_model = self.load_base_model() config = VBLoRAConfig( vector_length=1, num_vectors=2, target_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "vblora_tuning_mlp") class TestLoraEmbConv1D(RegressionTester): def get_output(self, model): input = torch.arange(90).reshape(9, 10).to(self.torch_device) with torch.inference_mode(): output = model(input) return output def load_base_model(self): class ModelEmbConv1D(nn.Module): def __init__(self): super().__init__() self.emb = nn.Embedding(100, 5) self.conv1d = Conv1D(1, 5) self.relu = nn.ReLU() self.flat = nn.Flatten() self.lin0 = nn.Linear(10, 2) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = self.emb(X) X = self.conv1d(X) X = self.relu(X) X = self.flat(X) X = self.lin0(X) X = self.sm(X) return X self.fix_seed() return ModelEmbConv1D().to(self.torch_device) def test_lora(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, target_modules=["emb", "conv1d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_emb_conv1d") class TestLoraConv2D(RegressionTester): def get_output(self, model): input = torch.arange(90).reshape(9, 10).to(self.torch_device) with torch.inference_mode(): output = model(input) return output def load_base_model(self): class ModelConv2D(nn.Module): def __init__(self): super().__init__() self.conv2d = nn.Conv2d(5, 10, 3) self.relu = nn.ReLU() self.flat = nn.Flatten() self.lin0 = nn.Linear(10, 2) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = X.float().reshape(2, 5, 3, 3) X = self.conv2d(X) X = self.relu(X) X = self.flat(X) X = self.lin0(X) X = self.sm(X) return X self.fix_seed() return ModelConv2D().to(self.torch_device) def test_lora(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, target_modules=["conv2d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_conv2d") def test_ia3(self): base_model = self.load_base_model() config = IA3Config( init_ia3_weights=False, target_modules=["conv2d"], feedforward_modules=["conv2d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "ia3_conv2d") def test_loha(self): # TODO self.skipTest("Skipping LoHa for now because init is not seedable") base_model = self.load_base_model() config = LoHaConfig( r=8, init_weights=False, target_modules=["conv2d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "loha_conv2d") def test_lokr(self): # TODO self.skipTest("Skipping LoKr for now because init is not seedable") base_model = self.load_base_model() config = LoKrConfig( r=8, init_weights=False, target_modules=["conv2d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lokr_conv2d") def test_boft(self): base_model = self.load_base_model() config = BOFTConfig( boft_block_size=3, target_modules=["conv2d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "boft_conv2d") class TestOpt(RegressionTester): def get_output(self, model): input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device) with torch.inference_mode(): output = model(input).logits return output def load_base_model(self): self.fix_seed() return AutoModelForCausalLM.from_pretrained("facebook/opt-350m").to(self.torch_device) def test_lora(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_opt-350m") def test_adalora(self): base_model = self.load_base_model() config = AdaLoraConfig( r=8, init_lora_weights=False, total_step=1, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "adalora_opt-350m") def test_ia3(self): base_model = self.load_base_model() config = IA3Config(init_ia3_weights=False) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "ia3_opt-350m") @require_non_cpu @require_bitsandbytes class TestOpt8bitBnb(RegressionTester): def get_output(self, model): input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device) with torch.inference_mode(): output = model(input).logits return output def load_base_model(self): self.fix_seed() model = AutoModelForCausalLM.from_pretrained( "facebook/opt-350m", quantization_config=BitsAndBytesConfig(load_in_8bit=True), ) return model def test_lora_8bit(self): # Warning: bnb results can vary significantly depending on the GPU. Therefore, if there is a change in GPU used # in the CI, the test can fail without any code change. In that case, delete the regression artifact and create # a new one using the new GPU. base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_opt-350m_bnb_8bit") def test_adalora(self): # TODO self.skipTest( "Skipping AdaLora for now, getting TypeError: unsupported operand type(s) for +=: 'dict' and 'Tensor'" ) # Warning: bnb results can vary significantly depending on the GPU. Therefore, if there is a change in GPU used # in the CI, the test can fail without any code change. In that case, delete the regression artifact and create # a new one using the new GPU. base_model = self.load_base_model() config = AdaLoraConfig( init_r=6, target_r=4, tinit=50, tfinal=100, total_step=200, deltaT=5, beta1=0.3, beta2=0.3, orth_reg_weight=0.2, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "adalora_opt-350m_8bit") @require_non_cpu @require_bitsandbytes class TestOpt4bitBnb(RegressionTester): def get_output(self, model): input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device) with torch.inference_mode(): output = model(input).logits return output def load_base_model(self): self.fix_seed() bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=False, bnb_4bit_compute_dtype=torch.float32, ) model = AutoModelForCausalLM.from_pretrained( "facebook/opt-350m", quantization_config=bnb_config, torch_dtype=torch.float32, ) return model def test_lora_4bit(self): # Warning: bnb results can vary significantly depending on the GPU. Therefore, if there is a change in GPU used # in the CI, the test can fail without any code change. In that case, delete the regression artifact and create # a new one using the new GPU. base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_opt-350m_bnb_4bit") def test_adalora(self): # TODO self.skipTest("Skipping AdaLora for now because of a bug, see #1113") # Warning: bnb results can vary significantly depending on the GPU. Therefore, if there is a change in GPU used # in the CI, the test can fail without any code change. In that case, delete the regression artifact and create # a new one using the new GPU. base_model = self.load_base_model() config = AdaLoraConfig( init_r=6, target_r=4, tinit=50, tfinal=100, total_step=200, deltaT=5, beta1=0.3, beta2=0.3, orth_reg_weight=0.2, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "adalora_opt-350m_4bit")
peft/tests/regression/test_regression.py/0
{ "file_path": "peft/tests/regression/test_regression.py", "repo_id": "peft", "token_count": 10766 }
220
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/decomposition/tests/test_incremental_pca.py import pytest import torch from datasets import load_dataset from torch.testing import assert_close from peft.utils.incremental_pca import IncrementalPCA torch.manual_seed(1999) @pytest.fixture(scope="module") def iris(): return load_dataset("scikit-learn/iris", split="train") def test_incremental_pca(iris): # Incremental PCA on dense arrays. n_components = 2 X = torch.tensor([iris["SepalLengthCm"], iris["SepalWidthCm"], iris["PetalLengthCm"], iris["PetalWidthCm"]]).T batch_size = X.shape[0] // 3 ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size) ipca.fit(X) X_transformed = ipca.transform(X) # PCA U, S, Vh = torch.linalg.svd(X - torch.mean(X, dim=0)) max_abs_rows = torch.argmax(torch.abs(Vh), dim=1) signs = torch.sign(Vh[range(Vh.shape[0]), max_abs_rows]) Vh *= signs.view(-1, 1) explained_variance = S**2 / (X.size(0) - 1) explained_variance_ratio = explained_variance / explained_variance.sum() assert X_transformed.shape == (X.shape[0], 2) assert_close( ipca.explained_variance_ratio_.sum().item(), explained_variance_ratio[:n_components].sum().item(), rtol=1e-3, atol=1e-3, ) def test_incremental_pca_check_projection(): # Test that the projection of data is correct. n, p = 100, 3 X = torch.randn(n, p, dtype=torch.float64) * 0.1 X[:10] += torch.tensor([3, 4, 5]) Xt = 0.1 * torch.randn(1, p, dtype=torch.float64) + torch.tensor([3, 4, 5]) # Get the reconstruction of the generated data X # Note that Xt has the same "components" as X, just separated # This is what we want to ensure is recreated correctly Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt) # Normalize Yt /= torch.sqrt((Yt**2).sum()) # Make sure that the first element of Yt is ~1, this means # the reconstruction worked as expected assert_close(torch.abs(Yt[0][0]).item(), 1.0, atol=1e-1, rtol=1e-1) def test_incremental_pca_validation(): # Test that n_components is <= n_features. X = torch.tensor([[0, 1, 0], [1, 0, 0]]) n_samples, n_features = X.shape n_components = 4 with pytest.raises( ValueError, match=( f"n_components={n_components} invalid" f" for n_features={n_features}, need more rows than" " columns for IncrementalPCA" " processing" ), ): IncrementalPCA(n_components, batch_size=10).fit(X) # Tests that n_components is also <= n_samples. n_components = 3 with pytest.raises( ValueError, match=(f"n_components={n_components} must be less or equal to the batch number of samples {n_samples}"), ): IncrementalPCA(n_components=n_components).partial_fit(X) def test_n_components_none(): # Ensures that n_components == None is handled correctly for n_samples, n_features in [(50, 10), (10, 50)]: X = torch.rand(n_samples, n_features) ipca = IncrementalPCA(n_components=None) # First partial_fit call, ipca.n_components_ is inferred from # min(X.shape) ipca.partial_fit(X) assert ipca.n_components == min(X.shape) def test_incremental_pca_num_features_change(): # Test that changing n_components will raise an error. n_samples = 100 X = torch.randn(n_samples, 20) X2 = torch.randn(n_samples, 50) ipca = IncrementalPCA(n_components=None) ipca.fit(X) with pytest.raises(ValueError): ipca.partial_fit(X2) def test_incremental_pca_batch_signs(): # Test that components_ sign is stable over batch sizes. n_samples = 100 n_features = 3 X = torch.randn(n_samples, n_features) all_components = [] batch_sizes = torch.arange(10, 20) for batch_size in batch_sizes: ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X) all_components.append(ipca.components_) for i, j in zip(all_components[:-1], all_components[1:]): assert_close(torch.sign(i), torch.sign(j), rtol=1e-6, atol=1e-6) def test_incremental_pca_batch_values(): # Test that components_ values are stable over batch sizes. n_samples = 100 n_features = 3 X = torch.randn(n_samples, n_features) all_components = [] batch_sizes = torch.arange(20, 40, 3) for batch_size in batch_sizes: ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X) all_components.append(ipca.components_) for i, j in zip(all_components[:-1], all_components[1:]): assert_close(i, j, rtol=1e-1, atol=1e-1) def test_incremental_pca_partial_fit(): # Test that fit and partial_fit get equivalent results. n, p = 50, 3 X = torch.randn(n, p) # spherical data X[:, 1] *= 0.00001 # make middle component relatively small X += torch.tensor([5, 4, 3]) # make a large mean # same check that we can find the original data from the transformed # signal (since the data is almost of rank n_components) batch_size = 10 ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X) pipca = IncrementalPCA(n_components=2, batch_size=batch_size) # Add one to make sure endpoint is included batch_itr = torch.arange(0, n + 1, batch_size) for i, j in zip(batch_itr[:-1], batch_itr[1:]): pipca.partial_fit(X[i:j, :]) assert_close(ipca.components_, pipca.components_, rtol=1e-3, atol=1e-3) def test_incremental_pca_lowrank(iris): # Test that lowrank mode is equivalent to non-lowrank mode. n_components = 2 X = torch.tensor([iris["SepalLengthCm"], iris["SepalWidthCm"], iris["PetalLengthCm"], iris["PetalWidthCm"]]).T batch_size = X.shape[0] // 3 ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size) ipca.fit(X) ipcalr = IncrementalPCA(n_components=n_components, batch_size=batch_size, lowrank=True) ipcalr.fit(X) assert_close(ipca.components_, ipcalr.components_, rtol=1e-7, atol=1e-7)
peft/tests/test_incremental_pca.py/0
{ "file_path": "peft/tests/test_incremental_pca.py", "repo_id": "peft", "token_count": 2775 }
221
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from dataclasses import asdict, replace import numpy as np import pytest from diffusers import StableDiffusionPipeline from peft import ( BOFTConfig, HRAConfig, LoHaConfig, LoKrConfig, LoraConfig, OFTConfig, get_peft_model, get_peft_model_state_dict, inject_adapter_in_model, set_peft_model_state_dict, ) from peft.tuners.tuners_utils import BaseTunerLayer from .testing_common import PeftCommonTester from .testing_utils import set_init_weights_false, temp_seed PEFT_DIFFUSERS_SD_MODELS_TO_TEST = ["hf-internal-testing/tiny-sd-pipe"] DIFFUSERS_CONFIGS = [ ( LoraConfig, { "text_encoder": { "r": 8, "lora_alpha": 32, "target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], "lora_dropout": 0.0, "bias": "none", "init_lora_weights": False, }, "unet": { "r": 8, "lora_alpha": 32, "target_modules": [ "proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2", ], "lora_dropout": 0.0, "bias": "none", "init_lora_weights": False, }, }, ), ( LoHaConfig, { "text_encoder": { "r": 8, "alpha": 32, "target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], "rank_dropout": 0.0, "module_dropout": 0.0, "init_weights": False, }, "unet": { "r": 8, "alpha": 32, "target_modules": [ "proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2", ], "rank_dropout": 0.0, "module_dropout": 0.0, "init_weights": False, }, }, ), ( LoKrConfig, { "text_encoder": { "r": 8, "alpha": 32, "target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], "rank_dropout": 0.0, "module_dropout": 0.0, "init_weights": False, }, "unet": { "r": 8, "alpha": 32, "target_modules": [ "proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2", ], "rank_dropout": 0.0, "module_dropout": 0.0, "init_weights": False, }, }, ), ( OFTConfig, { "text_encoder": { "r": 1, "oft_block_size": 0, "target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], "module_dropout": 0.0, "init_weights": False, "use_cayley_neumann": False, }, "unet": { "r": 1, "oft_block_size": 0, "target_modules": [ "proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2", ], "module_dropout": 0.0, "init_weights": False, "use_cayley_neumann": False, }, }, ), ( BOFTConfig, { "text_encoder": { "boft_block_num": 1, "boft_block_size": 0, "target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], "boft_dropout": 0.0, "init_weights": False, }, "unet": { "boft_block_num": 1, "boft_block_size": 0, "target_modules": [ "proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2", ], "boft_dropout": 0.0, "init_weights": False, }, }, ), ( HRAConfig, { "text_encoder": { "r": 8, "target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], "init_weights": False, }, "unet": { "r": 8, "target_modules": [ "proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2", ], "init_weights": False, }, }, ), ] def skip_if_not_lora(config_cls): if config_cls != LoraConfig: pytest.skip("Skipping test because it is only applicable to LoraConfig") class TestStableDiffusionModel(PeftCommonTester): r""" Tests that diffusers StableDiffusion model works with PEFT as expected. """ transformers_class = StableDiffusionPipeline sd_model = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sd-pipe") def instantiate_sd_peft(self, model_id, config_cls, config_kwargs): # Instantiate StableDiffusionPipeline if model_id == "hf-internal-testing/tiny-sd-pipe": # in CI, this model often times out on the hub, let's cache it model = copy.deepcopy(self.sd_model) else: model = self.transformers_class.from_pretrained(model_id) config_kwargs = config_kwargs.copy() text_encoder_kwargs = config_kwargs.pop("text_encoder") unet_kwargs = config_kwargs.pop("unet") # the remaining config kwargs should be applied to both configs for key, val in config_kwargs.items(): text_encoder_kwargs[key] = val unet_kwargs[key] = val # Instantiate text_encoder adapter config_text_encoder = config_cls(**text_encoder_kwargs) model.text_encoder = get_peft_model(model.text_encoder, config_text_encoder) # Instantiate unet adapter config_unet = config_cls(**unet_kwargs) model.unet = get_peft_model(model.unet, config_unet) # Move model to device model = model.to(self.torch_device) return model def prepare_inputs_for_testing(self): return { "prompt": "a high quality digital photo of a cute corgi", "num_inference_steps": 3, } @pytest.mark.parametrize("model_id", PEFT_DIFFUSERS_SD_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", DIFFUSERS_CONFIGS) def test_merge_layers(self, model_id, config_cls, config_kwargs): if (config_cls == LoKrConfig) and (self.torch_device not in ["cuda", "xpu"]): pytest.skip("Merging test with LoKr fails without GPU") # Instantiate model & adapters config_kwargs = set_init_weights_false(config_cls, config_kwargs) model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) # Generate output for peft modified StableDiffusion dummy_input = self.prepare_inputs_for_testing() with temp_seed(seed=42): peft_output = np.array(model(**dummy_input).images[0]).astype(np.float32) # Merge adapter and model if config_cls not in [LoHaConfig, OFTConfig, HRAConfig]: # TODO: Merging the text_encoder is leading to issues on CPU with PyTorch 2.1 model.text_encoder = model.text_encoder.merge_and_unload() model.unet = model.unet.merge_and_unload() # Generate output for peft merged StableDiffusion with temp_seed(seed=42): merged_output = np.array(model(**dummy_input).images[0]).astype(np.float32) # Images are in uint8 drange, so use large atol assert np.allclose(peft_output, merged_output, atol=1.0) @pytest.mark.parametrize("model_id", PEFT_DIFFUSERS_SD_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", DIFFUSERS_CONFIGS) def test_merge_layers_safe_merge(self, model_id, config_cls, config_kwargs): if (config_cls == LoKrConfig) and (self.torch_device not in ["cuda", "xpu"]): pytest.skip("Merging test with LoKr fails without GPU") # Instantiate model & adapters model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) # Generate output for peft modified StableDiffusion dummy_input = self.prepare_inputs_for_testing() with temp_seed(seed=42): peft_output = np.array(model(**dummy_input).images[0]).astype(np.float32) # Merge adapter and model if config_cls not in [LoHaConfig, OFTConfig, HRAConfig]: # TODO: Merging the text_encoder is leading to issues on CPU with PyTorch 2.1 model.text_encoder = model.text_encoder.merge_and_unload(safe_merge=True) model.unet = model.unet.merge_and_unload(safe_merge=True) # Generate output for peft merged StableDiffusion with temp_seed(seed=42): merged_output = np.array(model(**dummy_input).images[0]).astype(np.float32) # Images are in uint8 drange, so use large atol assert np.allclose(peft_output, merged_output, atol=1.0) @pytest.mark.parametrize("model_id", PEFT_DIFFUSERS_SD_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", DIFFUSERS_CONFIGS) def test_add_weighted_adapter_base_unchanged(self, model_id, config_cls, config_kwargs): skip_if_not_lora(config_cls) # Instantiate model & adapters config_kwargs = set_init_weights_false(config_cls, config_kwargs) model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) # Get current available adapter config text_encoder_adapter_name = next(iter(model.text_encoder.peft_config.keys())) unet_adapter_name = next(iter(model.unet.peft_config.keys())) text_encoder_adapter_config = replace(model.text_encoder.peft_config[text_encoder_adapter_name]) unet_adapter_config = replace(model.unet.peft_config[unet_adapter_name]) # Create weighted adapters model.text_encoder.add_weighted_adapter([unet_adapter_name], [0.5], "weighted_adapter_test") model.unet.add_weighted_adapter([unet_adapter_name], [0.5], "weighted_adapter_test") # Assert that base adapters config did not change assert asdict(text_encoder_adapter_config) == asdict(model.text_encoder.peft_config[text_encoder_adapter_name]) assert asdict(unet_adapter_config) == asdict(model.unet.peft_config[unet_adapter_name]) @pytest.mark.parametrize("model_id", PEFT_DIFFUSERS_SD_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", DIFFUSERS_CONFIGS) def test_disable_adapter(self, model_id, config_cls, config_kwargs): config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_disable_adapter(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_DIFFUSERS_SD_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", DIFFUSERS_CONFIGS) def test_load_model_low_cpu_mem_usage(self, model_id, config_cls, config_kwargs): # Instantiate model & adapters pipe = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) te_state_dict = get_peft_model_state_dict(pipe.text_encoder) unet_state_dict = get_peft_model_state_dict(pipe.unet) del pipe pipe = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) config_kwargs = config_kwargs.copy() text_encoder_kwargs = config_kwargs.pop("text_encoder") unet_kwargs = config_kwargs.pop("unet") # the remaining config kwargs should be applied to both configs for key, val in config_kwargs.items(): text_encoder_kwargs[key] = val unet_kwargs[key] = val config_text_encoder = config_cls(**text_encoder_kwargs) config_unet = config_cls(**unet_kwargs) # check text encoder inject_adapter_in_model(config_text_encoder, pipe.text_encoder, low_cpu_mem_usage=True) # sanity check that the adapter was applied: assert any(isinstance(module, BaseTunerLayer) for module in pipe.text_encoder.modules()) assert "meta" in {p.device.type for p in pipe.text_encoder.parameters()} set_peft_model_state_dict(pipe.text_encoder, te_state_dict, low_cpu_mem_usage=True) assert "meta" not in {p.device.type for p in pipe.text_encoder.parameters()} # check unet inject_adapter_in_model(config_unet, pipe.unet, low_cpu_mem_usage=True) # sanity check that the adapter was applied: assert any(isinstance(module, BaseTunerLayer) for module in pipe.unet.modules()) assert "meta" in {p.device.type for p in pipe.unet.parameters()} set_peft_model_state_dict(pipe.unet, unet_state_dict, low_cpu_mem_usage=True) assert "meta" not in {p.device.type for p in pipe.unet.parameters()}
peft/tests/test_stablediffusion.py/0
{ "file_path": "peft/tests/test_stablediffusion.py", "repo_id": "peft", "token_count": 7695 }
222
# Hugging Face Timm Docs ## Getting Started ``` pip install git+https://github.com/huggingface/doc-builder.git@main#egg=hf-doc-builder pip install watchdog black ``` ## Preview the Docs Locally ``` doc-builder preview timm hfdocs/source ```
pytorch-image-models/hfdocs/README.md/0
{ "file_path": "pytorch-image-models/hfdocs/README.md", "repo_id": "pytorch-image-models", "token_count": 88 }
223
# ResNeSt A **ResNeSt** is a variant on a [ResNet](https://paperswithcode.com/method/resnet), which instead stacks [Split-Attention blocks](https://paperswithcode.com/method/split-attention). The cardinal group representations are then concatenated along the channel dimension: \\( V = \text{Concat} \{ V^{1},V^{2},\cdots,{V}^{K} \} \\). As in standard residual blocks, the final output \\( Y \\) of otheur Split-Attention block is produced using a shortcut connection: \\( Y=V+X \\), if the input and output feature-map share the same shape. For blocks with a stride, an appropriate transformation \\( \mathcal{T} \\) is applied to the shortcut connection to align the output shapes: \\( Y=V+\mathcal{T}(X) \\). For example, \\( \mathcal{T} \\) can be strided convolution or combined convolution-with-pooling. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('resnest101e', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `resnest101e`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('resnest101e', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{zhang2020resnest, title={ResNeSt: Split-Attention Networks}, author={Hang Zhang and Chongruo Wu and Zhongyue Zhang and Yi Zhu and Haibin Lin and Zhi Zhang and Yue Sun and Tong He and Jonas Mueller and R. Manmatha and Mu Li and Alexander Smola}, year={2020}, eprint={2004.08955}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: ResNeSt Paper: Title: 'ResNeSt: Split-Attention Networks' URL: https://paperswithcode.com/paper/resnest-split-attention-networks Models: - Name: resnest101e In Collection: ResNeSt Metadata: FLOPs: 17423183648 Parameters: 48280000 File Size: 193782911 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax - Split Attention Tasks: - Image Classification Training Techniques: - AutoAugment - DropBlock - Label Smoothing - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 64x NVIDIA V100 GPUs ID: resnest101e LR: 0.1 Epochs: 270 Layers: 101 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '256' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L182 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest101-22405ba7.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.88% Top 5 Accuracy: 96.31% - Name: resnest14d In Collection: ResNeSt Metadata: FLOPs: 3548594464 Parameters: 10610000 File Size: 42562639 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax - Split Attention Tasks: - Image Classification Training Techniques: - AutoAugment - DropBlock - Label Smoothing - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 64x NVIDIA V100 GPUs ID: resnest14d LR: 0.1 Epochs: 270 Layers: 14 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 8192 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L148 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest14-9c8fe254.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.51% Top 5 Accuracy: 92.52% - Name: resnest200e In Collection: ResNeSt Metadata: FLOPs: 45954387872 Parameters: 70200000 File Size: 193782911 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax - Split Attention Tasks: - Image Classification Training Techniques: - AutoAugment - DropBlock - Label Smoothing - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 64x NVIDIA V100 GPUs ID: resnest200e LR: 0.1 Epochs: 270 Layers: 200 Dropout: 0.2 Crop Pct: '0.909' Momentum: 0.9 Batch Size: 2048 Image Size: '320' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L194 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest101-22405ba7.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.85% Top 5 Accuracy: 96.89% - Name: resnest269e In Collection: ResNeSt Metadata: FLOPs: 100830307104 Parameters: 110930000 File Size: 445402691 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax - Split Attention Tasks: - Image Classification Training Techniques: - AutoAugment - DropBlock - Label Smoothing - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 64x NVIDIA V100 GPUs ID: resnest269e LR: 0.1 Epochs: 270 Layers: 269 Dropout: 0.2 Crop Pct: '0.928' Momentum: 0.9 Batch Size: 2048 Image Size: '416' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L206 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest269-0cc87c48.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.53% Top 5 Accuracy: 96.99% - Name: resnest26d In Collection: ResNeSt Metadata: FLOPs: 4678918720 Parameters: 17070000 File Size: 68470242 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax - Split Attention Tasks: - Image Classification Training Techniques: - AutoAugment - DropBlock - Label Smoothing - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 64x NVIDIA V100 GPUs ID: resnest26d LR: 0.1 Epochs: 270 Layers: 26 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 8192 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L159 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest26-50eb607c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.48% Top 5 Accuracy: 94.3% - Name: resnest50d In Collection: ResNeSt Metadata: FLOPs: 6937106336 Parameters: 27480000 File Size: 110273258 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax - Split Attention Tasks: - Image Classification Training Techniques: - AutoAugment - DropBlock - Label Smoothing - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 64x NVIDIA V100 GPUs ID: resnest50d LR: 0.1 Epochs: 270 Layers: 50 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 8192 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L170 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50-528c19ca.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.96% Top 5 Accuracy: 95.38% - Name: resnest50d_1s4x24d In Collection: ResNeSt Metadata: FLOPs: 5686764544 Parameters: 25680000 File Size: 103045531 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax - Split Attention Tasks: - Image Classification Training Techniques: - AutoAugment - DropBlock - Label Smoothing - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 64x NVIDIA V100 GPUs ID: resnest50d_1s4x24d LR: 0.1 Epochs: 270 Layers: 50 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 8192 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L229 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_1s4x24d-d4a4f76f.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.0% Top 5 Accuracy: 95.33% - Name: resnest50d_4s2x40d In Collection: ResNeSt Metadata: FLOPs: 5657064720 Parameters: 30420000 File Size: 122133282 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax - Split Attention Tasks: - Image Classification Training Techniques: - AutoAugment - DropBlock - Label Smoothing - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 64x NVIDIA V100 GPUs ID: resnest50d_4s2x40d LR: 0.1 Epochs: 270 Layers: 50 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 8192 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnest.py#L218 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_4s2x40d-41d14ed0.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.11% Top 5 Accuracy: 95.55% -->
pytorch-image-models/hfdocs/source/models/resnest.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/resnest.mdx", "repo_id": "pytorch-image-models", "token_count": 5469 }
224
# (Tensorflow) EfficientNet **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scale network width, depth, and resolution in a principled way. The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('tf_efficientnet_b0', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('tf_efficientnet_b0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{tan2020efficientnet, title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, author={Mingxing Tan and Quoc V. Le}, year={2020}, eprint={1905.11946}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- Type: model-index Collections: - Name: TF EfficientNet Paper: Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks' URL: https://paperswithcode.com/paper/efficientnet-rethinking-model-scaling-for Models: - Name: tf_efficientnet_b0 In Collection: TF EfficientNet Metadata: FLOPs: 488688572 Parameters: 5290000 File Size: 21383997 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet Training Resources: TPUv3 Cloud TPU ID: tf_efficientnet_b0 LR: 0.256 Epochs: 350 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 2048 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1241 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.85% Top 5 Accuracy: 93.23% - Name: tf_efficientnet_b1 In Collection: TF EfficientNet Metadata: FLOPs: 883633200 Parameters: 7790000 File Size: 31512534 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b1 LR: 0.256 Epochs: 350 Crop Pct: '0.882' Momentum: 0.9 Batch Size: 2048 Image Size: '240' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1251 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.84% Top 5 Accuracy: 94.2% - Name: tf_efficientnet_b2 In Collection: TF EfficientNet Metadata: FLOPs: 1234321170 Parameters: 9110000 File Size: 36797929 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b2 LR: 0.256 Epochs: 350 Crop Pct: '0.89' Momentum: 0.9 Batch Size: 2048 Image Size: '260' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1261 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.07% Top 5 Accuracy: 94.9% - Name: tf_efficientnet_b3 In Collection: TF EfficientNet Metadata: FLOPs: 2275247568 Parameters: 12230000 File Size: 49381362 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b3 LR: 0.256 Epochs: 350 Crop Pct: '0.904' Momentum: 0.9 Batch Size: 2048 Image Size: '300' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1271 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.65% Top 5 Accuracy: 95.72% - Name: tf_efficientnet_b4 In Collection: TF EfficientNet Metadata: FLOPs: 5749638672 Parameters: 19340000 File Size: 77989689 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet Training Resources: TPUv3 Cloud TPU ID: tf_efficientnet_b4 LR: 0.256 Epochs: 350 Crop Pct: '0.922' Momentum: 0.9 Batch Size: 2048 Image Size: '380' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1281 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.03% Top 5 Accuracy: 96.3% - Name: tf_efficientnet_b5 In Collection: TF EfficientNet Metadata: FLOPs: 13176501888 Parameters: 30390000 File Size: 122403150 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b5 LR: 0.256 Epochs: 350 Crop Pct: '0.934' Momentum: 0.9 Batch Size: 2048 Image Size: '456' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1291 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.81% Top 5 Accuracy: 96.75% - Name: tf_efficientnet_b6 In Collection: TF EfficientNet Metadata: FLOPs: 24180518488 Parameters: 43040000 File Size: 173232007 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b6 LR: 0.256 Epochs: 350 Crop Pct: '0.942' Momentum: 0.9 Batch Size: 2048 Image Size: '528' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1301 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.11% Top 5 Accuracy: 96.89% - Name: tf_efficientnet_b7 In Collection: TF EfficientNet Metadata: FLOPs: 48205304880 Parameters: 66349999 File Size: 266850607 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b7 LR: 0.256 Epochs: 350 Crop Pct: '0.949' Momentum: 0.9 Batch Size: 2048 Image Size: '600' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1312 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.93% Top 5 Accuracy: 97.2% - Name: tf_efficientnet_b8 In Collection: TF EfficientNet Metadata: FLOPs: 80962956270 Parameters: 87410000 File Size: 351379853 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b8 LR: 0.256 Epochs: 350 Crop Pct: '0.954' Momentum: 0.9 Batch Size: 2048 Image Size: '672' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1323 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.35% Top 5 Accuracy: 97.39% - Name: tf_efficientnet_el In Collection: TF EfficientNet Metadata: FLOPs: 9356616096 Parameters: 10590000 File Size: 42800271 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: tf_efficientnet_el Crop Pct: '0.904' Image Size: '300' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1551 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.45% Top 5 Accuracy: 95.17% - Name: tf_efficientnet_em In Collection: TF EfficientNet Metadata: FLOPs: 3636607040 Parameters: 6900000 File Size: 27933644 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: tf_efficientnet_em Crop Pct: '0.882' Image Size: '240' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1541 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.71% Top 5 Accuracy: 94.33% - Name: tf_efficientnet_es In Collection: TF EfficientNet Metadata: FLOPs: 2057577472 Parameters: 5440000 File Size: 22008479 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: tf_efficientnet_es Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1531 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.28% Top 5 Accuracy: 93.6% - Name: tf_efficientnet_l2_ns_475 In Collection: TF EfficientNet Metadata: FLOPs: 217795669644 Parameters: 480310000 File Size: 1925950424 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: TPUv3 Cloud TPU ID: tf_efficientnet_l2_ns_475 LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.936' Momentum: 0.9 Batch Size: 2048 Image Size: '475' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1509 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 88.24% Top 5 Accuracy: 98.55% -->
pytorch-image-models/hfdocs/source/models/tf-efficientnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/tf-efficientnet.mdx", "repo_id": "pytorch-image-models", "token_count": 8002 }
225
""" ONNX export script Export PyTorch models as ONNX graphs. This export script originally started as an adaptation of code snippets found at https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html The default parameters work with PyTorch 1.6 and ONNX 1.7 and produce an optimal ONNX graph for hosting in the ONNX runtime (see onnx_validate.py). To export an ONNX model compatible with caffe2 (see caffe2_benchmark.py and caffe2_validate.py), the --keep-init and --aten-fallback flags are currently required. Older versions of PyTorch/ONNX (tested PyTorch 1.4, ONNX 1.5) do not need extra flags for caffe2 compatibility, but they produce a model that isn't as fast running on ONNX runtime. Most new release of PyTorch and ONNX cause some sort of breakage in the export / usage of ONNX models. Please do your research and search ONNX and PyTorch issue tracker before asking me. Thanks. Copyright 2020 Ross Wightman """ import argparse import timm from timm.utils.model import reparameterize_model from timm.utils.onnx import onnx_export parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') parser.add_argument('output', metavar='ONNX_FILE', help='output model filename') parser.add_argument('--model', '-m', metavar='MODEL', default='mobilenetv3_large_100', help='model architecture (default: mobilenetv3_large_100)') parser.add_argument('--opset', type=int, default=None, help='ONNX opset to use (default: 10)') parser.add_argument('--keep-init', action='store_true', default=False, help='Keep initializers as input. Needed for Caffe2 compatible export in newer PyTorch/ONNX.') parser.add_argument('--aten-fallback', action='store_true', default=False, help='Fallback to ATEN ops. Helps fix AdaptiveAvgPool issue with Caffe2 in newer PyTorch/ONNX.') parser.add_argument('--dynamic-size', action='store_true', default=False, help='Export model width dynamic width/height. Not recommended for "tf" models with SAME padding.') parser.add_argument('--check-forward', action='store_true', default=False, help='Do a full check of torch vs onnx forward after export.') parser.add_argument('-b', '--batch-size', default=1, type=int, metavar='N', help='mini-batch size (default: 1)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of of dataset') parser.add_argument('--num-classes', type=int, default=None, help='Number classes in dataset') parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='path to checkpoint (default: none)') parser.add_argument('--reparam', default=False, action='store_true', help='Reparameterize model') parser.add_argument('--training', default=False, action='store_true', help='Export in training mode (default is eval)') parser.add_argument('--verbose', default=False, action='store_true', help='Extra stdout output') parser.add_argument('--dynamo', default=False, action='store_true', help='Use torch dynamo export.') def main(): args = parser.parse_args() args.pretrained = True if args.checkpoint: args.pretrained = False print("==> Creating PyTorch {} model".format(args.model)) # NOTE exportable=True flag disables autofn/jit scripted activations and uses Conv2dSameExport layers # for models using SAME padding model = timm.create_model( args.model, num_classes=args.num_classes, in_chans=3, pretrained=args.pretrained, checkpoint_path=args.checkpoint, exportable=True, ) if args.reparam: model = reparameterize_model(model) if args.input_size is not None: assert len(args.input_size) == 3, 'input-size should be N H W (channels, height, width)' input_size = args.input_size elif args.img_size is not None: input_size = (3, args.img_size, args.img_size) else: input_size = None onnx_export( model, args.output, opset=args.opset, dynamic_size=args.dynamic_size, aten_fallback=args.aten_fallback, keep_initializers=args.keep_init, check_forward=args.check_forward, training=args.training, verbose=args.verbose, use_dynamo=args.dynamo, input_size=input_size, batch_size=args.batch_size, ) if __name__ == '__main__': main()
pytorch-image-models/onnx_export.py/0
{ "file_path": "pytorch-image-models/onnx_export.py", "repo_id": "pytorch-image-models", "token_count": 2010 }
226
import pytest import torch import torch.nn as nn from timm.layers import create_act_layer, set_layer_config, get_act_layer, get_act_fn, Attention2d, MultiQueryAttentionV2 import importlib import os torch_backend = os.environ.get('TORCH_BACKEND') if torch_backend is not None: importlib.import_module(torch_backend) torch_device = os.environ.get('TORCH_DEVICE', 'cpu') class MLP(nn.Module): def __init__(self, act_layer="relu", inplace=True): super(MLP, self).__init__() self.fc1 = nn.Linear(1000, 100) self.act = create_act_layer(act_layer, inplace=inplace) self.fc2 = nn.Linear(100, 10) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.fc2(x) return x def _run_act_layer_grad(act_type, inplace=True): x = torch.rand(10, 1000) * 10 m = MLP(act_layer=act_type, inplace=inplace) def _run(x, act_layer=''): if act_layer: # replace act layer if set m.act = create_act_layer(act_layer, inplace=inplace) out = m(x) l = (out - 0).pow(2).sum() return l x = x.to(device=torch_device) m.to(device=torch_device) out_me = _run(x) with set_layer_config(scriptable=True): out_jit = _run(x, act_type) assert torch.isclose(out_jit, out_me) with set_layer_config(no_jit=True): out_basic = _run(x, act_type) assert torch.isclose(out_basic, out_jit) def test_swish_grad(): for _ in range(100): _run_act_layer_grad('swish') def test_mish_grad(): for _ in range(100): _run_act_layer_grad('mish') def test_hard_sigmoid_grad(): for _ in range(100): _run_act_layer_grad('hard_sigmoid', inplace=None) def test_hard_swish_grad(): for _ in range(100): _run_act_layer_grad('hard_swish') def test_hard_mish_grad(): for _ in range(100): _run_act_layer_grad('hard_mish') def test_get_act_layer_empty_string(): # Empty string should return None assert get_act_layer('') is None def test_create_act_layer_inplace_error(): class NoInplaceAct(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x # Should recover when inplace arg causes TypeError layer = create_act_layer(NoInplaceAct, inplace=True) assert isinstance(layer, NoInplaceAct) def test_create_act_layer_edge_cases(): # Test None input assert create_act_layer(None) is None # Test TypeError handling for inplace class CustomAct(nn.Module): def __init__(self, **kwargs): super().__init__() def forward(self, x): return x result = create_act_layer(CustomAct, inplace=True) assert isinstance(result, CustomAct) def test_get_act_fn_callable(): def custom_act(x): return x assert get_act_fn(custom_act) is custom_act def test_get_act_fn_none(): assert get_act_fn(None) is None assert get_act_fn('') is None @pytest.mark.parametrize("dim", [128]) @pytest.mark.parametrize("dim_out", [128, 256]) @pytest.mark.parametrize("use_m", [True, False]) def test_mqa_v2(dim, dim_out, use_m): mqa = MultiQueryAttentionV2(dim, dim_out) x = torch.randn(1, dim, 32, 48) if use_m: m = torch.randn(1, dim, 16, 24) else: m = None y = mqa(x, m=m) assert (y.shape) == (1, dim_out, 32, 48) @pytest.mark.parametrize("bias", [True, False]) @pytest.mark.parametrize("expand_first", [True, False]) @pytest.mark.parametrize("head_first", [True, False]) @pytest.mark.parametrize("attn_mask", [True, False]) def test_attn2d(bias, expand_first, head_first, attn_mask): x = torch.randn(1, 128, 32, 48) attn = Attention2d( 128, 128, num_heads=4, bias=bias, expand_first=expand_first, head_first=head_first ) if attn_mask: mask = torch.randint(0, 1, size=(32 * 48, 32 * 48), dtype=torch.float32) else: mask = None o1 = attn(x, mask) attn.fused_attn = False o2 = attn(x, mask) assert torch.allclose(o1, o2, atol=1e-5), f"{torch.abs(o1 - o2).max()}"
pytorch-image-models/tests/test_layers.py/0
{ "file_path": "pytorch-image-models/tests/test_layers.py", "repo_id": "pytorch-image-models", "token_count": 1935 }
227
from abc import ABC, abstractmethod from typing import Dict, List, Optional, Union class DatasetInfo(ABC): def __init__(self): pass @abstractmethod def num_classes(self): pass @abstractmethod def label_names(self): pass @abstractmethod def label_descriptions(self, detailed: bool = False, as_dict: bool = False) -> Union[List[str], Dict[str, str]]: pass @abstractmethod def index_to_label_name(self, index) -> str: pass @abstractmethod def index_to_description(self, index: int, detailed: bool = False) -> str: pass @abstractmethod def label_name_to_description(self, label: str, detailed: bool = False) -> str: pass class CustomDatasetInfo(DatasetInfo): """ DatasetInfo that wraps passed values for custom datasets.""" def __init__( self, label_names: Union[List[str], Dict[int, str]], label_descriptions: Optional[Dict[str, str]] = None ): super().__init__() assert len(label_names) > 0 self._label_names = label_names # label index => label name mapping self._label_descriptions = label_descriptions # label name => label description mapping if self._label_descriptions is not None: # validate descriptions (label names required) assert isinstance(self._label_descriptions, dict) for n in self._label_names: assert n in self._label_descriptions def num_classes(self): return len(self._label_names) def label_names(self): return self._label_names def label_descriptions(self, detailed: bool = False, as_dict: bool = False) -> Union[List[str], Dict[str, str]]: return self._label_descriptions def label_name_to_description(self, label: str, detailed: bool = False) -> str: if self._label_descriptions: return self._label_descriptions[label] return label # return label name itself if a descriptions is not present def index_to_label_name(self, index) -> str: assert 0 <= index < len(self._label_names) return self._label_names[index] def index_to_description(self, index: int, detailed: bool = False) -> str: label = self.index_to_label_name(index) return self.label_name_to_description(label, detailed=detailed)
pytorch-image-models/timm/data/dataset_info.py/0
{ "file_path": "pytorch-image-models/timm/data/dataset_info.py", "repo_id": "pytorch-image-models", "token_count": 941 }
228
""" Dataset reader that wraps Hugging Face datasets Hacked together by / Copyright 2022 Ross Wightman """ import io import math from typing import Optional import torch import torch.distributed as dist from PIL import Image try: import datasets except ImportError as e: print("Please install Hugging Face datasets package `pip install datasets`.") raise e from .class_map import load_class_map from .reader import Reader def get_class_labels(info, label_key='label'): if 'label' not in info.features: return {} class_label = info.features[label_key] class_to_idx = {n: class_label.str2int(n) for n in class_label.names} return class_to_idx class ReaderHfds(Reader): def __init__( self, name: str, root: Optional[str] = None, split: str = 'train', class_map: dict = None, input_key: str = 'image', target_key: str = 'label', download: bool = False, trust_remote_code: bool = False ): """ """ super().__init__() self.root = root self.split = split self.dataset = datasets.load_dataset( name, # 'name' maps to path arg in hf datasets split=split, cache_dir=self.root, # timm doesn't expect hidden cache dir for datasets, specify a path if root set trust_remote_code=trust_remote_code ) # leave decode for caller, plus we want easy access to original path names... self.dataset = self.dataset.cast_column(input_key, datasets.Image(decode=False)) self.image_key = input_key self.label_key = target_key self.remap_class = False if class_map: self.class_to_idx = load_class_map(class_map) self.remap_class = True else: self.class_to_idx = get_class_labels(self.dataset.info, self.label_key) self.split_info = self.dataset.info.splits[split] self.num_samples = self.split_info.num_examples def __getitem__(self, index): item = self.dataset[index] image = item[self.image_key] if 'bytes' in image and image['bytes']: image = io.BytesIO(image['bytes']) else: assert 'path' in image and image['path'] image = open(image['path'], 'rb') label = item[self.label_key] if self.remap_class: label = self.class_to_idx[label] return image, label def __len__(self): return len(self.dataset) def _filename(self, index, basename=False, absolute=False): item = self.dataset[index] return item[self.image_key]['path']
pytorch-image-models/timm/data/readers/reader_hfds.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/reader_hfds.py", "repo_id": "pytorch-image-models", "token_count": 1197 }
229
""" PyTorch selectable adaptive pooling Adaptive pooling with the ability to select the type of pooling from: * 'avg' - Average pooling * 'max' - Max pooling * 'avgmax' - Sum of average and max pooling re-scaled by 0.5 * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim Both a functional and a nn.Module version of the pooling is provided. Hacked together by / Copyright 2020 Ross Wightman """ from typing import Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from .format import get_spatial_dim, get_channel_dim _int_tuple_2_t = Union[int, Tuple[int, int]] def adaptive_pool_feat_mult(pool_type='avg'): if pool_type.endswith('catavgmax'): return 2 else: return 1 def adaptive_avgmax_pool2d(x, output_size: _int_tuple_2_t = 1): x_avg = F.adaptive_avg_pool2d(x, output_size) x_max = F.adaptive_max_pool2d(x, output_size) return 0.5 * (x_avg + x_max) def adaptive_catavgmax_pool2d(x, output_size: _int_tuple_2_t = 1): x_avg = F.adaptive_avg_pool2d(x, output_size) x_max = F.adaptive_max_pool2d(x, output_size) return torch.cat((x_avg, x_max), 1) def select_adaptive_pool2d(x, pool_type='avg', output_size: _int_tuple_2_t = 1): """Selectable global pooling function with dynamic input kernel size """ if pool_type == 'avg': x = F.adaptive_avg_pool2d(x, output_size) elif pool_type == 'avgmax': x = adaptive_avgmax_pool2d(x, output_size) elif pool_type == 'catavgmax': x = adaptive_catavgmax_pool2d(x, output_size) elif pool_type == 'max': x = F.adaptive_max_pool2d(x, output_size) else: assert False, 'Invalid pool type: %s' % pool_type return x class FastAdaptiveAvgPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: F = 'NCHW'): super(FastAdaptiveAvgPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): return x.mean(self.dim, keepdim=not self.flatten) class FastAdaptiveMaxPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'): super(FastAdaptiveMaxPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): return x.amax(self.dim, keepdim=not self.flatten) class FastAdaptiveAvgMaxPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'): super(FastAdaptiveAvgMaxPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): x_avg = x.mean(self.dim, keepdim=not self.flatten) x_max = x.amax(self.dim, keepdim=not self.flatten) return 0.5 * x_avg + 0.5 * x_max class FastAdaptiveCatAvgMaxPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'): super(FastAdaptiveCatAvgMaxPool, self).__init__() self.flatten = flatten self.dim_reduce = get_spatial_dim(input_fmt) if flatten: self.dim_cat = 1 else: self.dim_cat = get_channel_dim(input_fmt) def forward(self, x): x_avg = x.mean(self.dim_reduce, keepdim=not self.flatten) x_max = x.amax(self.dim_reduce, keepdim=not self.flatten) return torch.cat((x_avg, x_max), self.dim_cat) class AdaptiveAvgMaxPool2d(nn.Module): def __init__(self, output_size: _int_tuple_2_t = 1): super(AdaptiveAvgMaxPool2d, self).__init__() self.output_size = output_size def forward(self, x): return adaptive_avgmax_pool2d(x, self.output_size) class AdaptiveCatAvgMaxPool2d(nn.Module): def __init__(self, output_size: _int_tuple_2_t = 1): super(AdaptiveCatAvgMaxPool2d, self).__init__() self.output_size = output_size def forward(self, x): return adaptive_catavgmax_pool2d(x, self.output_size) class SelectAdaptivePool2d(nn.Module): """Selectable global pooling layer with dynamic input kernel size """ def __init__( self, output_size: _int_tuple_2_t = 1, pool_type: str = 'fast', flatten: bool = False, input_fmt: str = 'NCHW', ): super(SelectAdaptivePool2d, self).__init__() assert input_fmt in ('NCHW', 'NHWC') self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing pool_type = pool_type.lower() if not pool_type: self.pool = nn.Identity() # pass through self.flatten = nn.Flatten(1) if flatten else nn.Identity() elif pool_type.startswith('fast') or input_fmt != 'NCHW': assert output_size == 1, 'Fast pooling and non NCHW input formats require output_size == 1.' if pool_type.endswith('catavgmax'): self.pool = FastAdaptiveCatAvgMaxPool(flatten, input_fmt=input_fmt) elif pool_type.endswith('avgmax'): self.pool = FastAdaptiveAvgMaxPool(flatten, input_fmt=input_fmt) elif pool_type.endswith('max'): self.pool = FastAdaptiveMaxPool(flatten, input_fmt=input_fmt) elif pool_type == 'fast' or pool_type.endswith('avg'): self.pool = FastAdaptiveAvgPool(flatten, input_fmt=input_fmt) else: assert False, 'Invalid pool type: %s' % pool_type self.flatten = nn.Identity() else: assert input_fmt == 'NCHW' if pool_type == 'avgmax': self.pool = AdaptiveAvgMaxPool2d(output_size) elif pool_type == 'catavgmax': self.pool = AdaptiveCatAvgMaxPool2d(output_size) elif pool_type == 'max': self.pool = nn.AdaptiveMaxPool2d(output_size) elif pool_type == 'avg': self.pool = nn.AdaptiveAvgPool2d(output_size) else: assert False, 'Invalid pool type: %s' % pool_type self.flatten = nn.Flatten(1) if flatten else nn.Identity() def is_identity(self): return not self.pool_type def forward(self, x): x = self.pool(x) x = self.flatten(x) return x def feat_mult(self): return adaptive_pool_feat_mult(self.pool_type) def __repr__(self): return self.__class__.__name__ + '(' \ + 'pool_type=' + self.pool_type \ + ', flatten=' + str(self.flatten) + ')'
pytorch-image-models/timm/layers/adaptive_avgmax_pool.py/0
{ "file_path": "pytorch-image-models/timm/layers/adaptive_avgmax_pool.py", "repo_id": "pytorch-image-models", "token_count": 3039 }
230
""" Norm Layer Factory Create norm modules by string (to mirror create_act and creat_norm-act fns) Copyright 2022 Ross Wightman """ import functools import types from typing import Type import torch.nn as nn from .norm import ( GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, LayerNormFp32, LayerNorm2dFp32, RmsNorm, RmsNorm2d, RmsNormFp32, RmsNorm2dFp32, SimpleNorm, SimpleNorm2d, SimpleNormFp32, SimpleNorm2dFp32, ) from torchvision.ops.misc import FrozenBatchNorm2d _NORM_MAP = dict( batchnorm=nn.BatchNorm2d, batchnorm2d=nn.BatchNorm2d, batchnorm1d=nn.BatchNorm1d, groupnorm=GroupNorm, groupnorm1=GroupNorm1, layernorm=LayerNorm, layernorm2d=LayerNorm2d, layernormfp32=LayerNormFp32, layernorm2dfp32=LayerNorm2dFp32, rmsnorm=RmsNorm, rmsnorm2d=RmsNorm2d, rmsnormfp32=RmsNormFp32, rmsnorm2dfp32=RmsNorm2dFp32, simplenorm=SimpleNorm, simplenorm2d=SimpleNorm2d, simplenormfp32=SimpleNormFp32, simplenorm2dfp32=SimpleNorm2dFp32, frozenbatchnorm2d=FrozenBatchNorm2d, ) _NORM_TYPES = {m for n, m in _NORM_MAP.items()} def create_norm_layer(layer_name, num_features, **kwargs): layer = get_norm_layer(layer_name) layer_instance = layer(num_features, **kwargs) return layer_instance def get_norm_layer(norm_layer): if norm_layer is None: return None assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) norm_kwargs = {} # unbind partial fn, so args can be rebound later if isinstance(norm_layer, functools.partial): norm_kwargs.update(norm_layer.keywords) norm_layer = norm_layer.func if isinstance(norm_layer, str): if not norm_layer: return None layer_name = norm_layer.replace('_', '').lower() norm_layer = _NORM_MAP[layer_name] else: norm_layer = norm_layer if norm_kwargs: norm_layer = functools.partial(norm_layer, **norm_kwargs) # bind/rebind args return norm_layer
pytorch-image-models/timm/layers/create_norm.py/0
{ "file_path": "pytorch-image-models/timm/layers/create_norm.py", "repo_id": "pytorch-image-models", "token_count": 902 }
231
""" Interpolation helpers for timm layers RegularGridInterpolator from https://github.com/sbarratt/torch_interpolations Copyright Shane Barratt, Apache 2.0 license """ import torch from itertools import product class RegularGridInterpolator: """ Interpolate data defined on a rectilinear grid with even or uneven spacing. Produces similar results to scipy RegularGridInterpolator or interp2d in 'linear' mode. Taken from https://github.com/sbarratt/torch_interpolations """ def __init__(self, points, values): self.points = points self.values = values assert isinstance(self.points, tuple) or isinstance(self.points, list) assert isinstance(self.values, torch.Tensor) self.ms = list(self.values.shape) self.n = len(self.points) assert len(self.ms) == self.n for i, p in enumerate(self.points): assert isinstance(p, torch.Tensor) assert p.shape[0] == self.values.shape[i] def __call__(self, points_to_interp): assert self.points is not None assert self.values is not None assert len(points_to_interp) == len(self.points) K = points_to_interp[0].shape[0] for x in points_to_interp: assert x.shape[0] == K idxs = [] dists = [] overalls = [] for p, x in zip(self.points, points_to_interp): idx_right = torch.bucketize(x, p) idx_right[idx_right >= p.shape[0]] = p.shape[0] - 1 idx_left = (idx_right - 1).clamp(0, p.shape[0] - 1) dist_left = x - p[idx_left] dist_right = p[idx_right] - x dist_left[dist_left < 0] = 0. dist_right[dist_right < 0] = 0. both_zero = (dist_left == 0) & (dist_right == 0) dist_left[both_zero] = dist_right[both_zero] = 1. idxs.append((idx_left, idx_right)) dists.append((dist_left, dist_right)) overalls.append(dist_left + dist_right) numerator = 0. for indexer in product([0, 1], repeat=self.n): as_s = [idx[onoff] for onoff, idx in zip(indexer, idxs)] bs_s = [dist[1 - onoff] for onoff, dist in zip(indexer, dists)] numerator += self.values[as_s] * \ torch.prod(torch.stack(bs_s), dim=0) denominator = torch.prod(torch.stack(overalls), dim=0) return numerator / denominator
pytorch-image-models/timm/layers/interpolate.py/0
{ "file_path": "pytorch-image-models/timm/layers/interpolate.py", "repo_id": "pytorch-image-models", "token_count": 1121 }
232
""" Position Embedding Utilities Hacked together by / Copyright 2022 Ross Wightman """ import logging import math from typing import List, Tuple, Optional, Union import torch import torch.nn.functional as F from ._fx import register_notrace_function _logger = logging.getLogger(__name__) @torch.fx.wrap @register_notrace_function def resample_abs_pos_embed( posemb: torch.Tensor, new_size: List[int], old_size: Optional[List[int]] = None, num_prefix_tokens: int = 1, interpolation: str = 'bicubic', antialias: bool = True, verbose: bool = False, ): # sort out sizes, assume square if old size not provided num_pos_tokens = posemb.shape[1] num_new_tokens = new_size[0] * new_size[1] + num_prefix_tokens if num_new_tokens == num_pos_tokens and new_size[0] == new_size[1]: return posemb if old_size is None: hw = int(math.sqrt(num_pos_tokens - num_prefix_tokens)) old_size = hw, hw if num_prefix_tokens: posemb_prefix, posemb = posemb[:, :num_prefix_tokens], posemb[:, num_prefix_tokens:] else: posemb_prefix, posemb = None, posemb # do the interpolation embed_dim = posemb.shape[-1] orig_dtype = posemb.dtype posemb = posemb.float() # interpolate needs float32 posemb = posemb.reshape(1, old_size[0], old_size[1], -1).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=new_size, mode=interpolation, antialias=antialias) posemb = posemb.permute(0, 2, 3, 1).reshape(1, -1, embed_dim) posemb = posemb.to(orig_dtype) # add back extra (class, etc) prefix tokens if posemb_prefix is not None: posemb = torch.cat([posemb_prefix, posemb], dim=1) if not torch.jit.is_scripting() and verbose: _logger.info(f'Resized position embedding: {old_size} to {new_size}.') return posemb @torch.fx.wrap @register_notrace_function def resample_abs_pos_embed_nhwc( posemb: torch.Tensor, new_size: List[int], interpolation: str = 'bicubic', antialias: bool = True, verbose: bool = False, ): if new_size[0] == posemb.shape[-3] and new_size[1] == posemb.shape[-2]: return posemb orig_dtype = posemb.dtype posemb = posemb.float() posemb = posemb.reshape(1, posemb.shape[-3], posemb.shape[-2], posemb.shape[-1]).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=new_size, mode=interpolation, antialias=antialias) posemb = posemb.permute(0, 2, 3, 1).to(orig_dtype) if not torch.jit.is_scripting() and verbose: _logger.info(f'Resized position embedding: {posemb.shape[-3:-1]} to {new_size}.') return posemb
pytorch-image-models/timm/layers/pos_embed.py/0
{ "file_path": "pytorch-image-models/timm/layers/pos_embed.py", "repo_id": "pytorch-image-models", "token_count": 1160 }
233
""" Binary Cross Entropy w/ a few extras Hacked together by / Copyright 2021 Ross Wightman """ from typing import Optional, Union import torch import torch.nn as nn import torch.nn.functional as F class BinaryCrossEntropy(nn.Module): """ BCE with optional one-hot from dense targets, label smoothing, thresholding NOTE for experiments comparing CE to BCE /w label smoothing, may remove """ def __init__( self, smoothing=0.1, target_threshold: Optional[float] = None, weight: Optional[torch.Tensor] = None, reduction: str = 'mean', sum_classes: bool = False, pos_weight: Optional[Union[torch.Tensor, float]] = None, ): super(BinaryCrossEntropy, self).__init__() assert 0. <= smoothing < 1.0 if pos_weight is not None: if not isinstance(pos_weight, torch.Tensor): pos_weight = torch.tensor(pos_weight) self.smoothing = smoothing self.target_threshold = target_threshold self.reduction = 'none' if sum_classes else reduction self.sum_classes = sum_classes self.register_buffer('weight', weight) self.register_buffer('pos_weight', pos_weight) def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: batch_size = x.shape[0] assert batch_size == target.shape[0] if target.shape != x.shape: # NOTE currently assume smoothing or other label softening is applied upstream if targets are already sparse num_classes = x.shape[-1] # FIXME should off/on be different for smoothing w/ BCE? Other impl out there differ off_value = self.smoothing / num_classes on_value = 1. - self.smoothing + off_value target = target.long().view(-1, 1) target = torch.full( (batch_size, num_classes), off_value, device=x.device, dtype=x.dtype).scatter_(1, target, on_value) if self.target_threshold is not None: # Make target 0, or 1 if threshold set target = target.gt(self.target_threshold).to(dtype=target.dtype) loss = F.binary_cross_entropy_with_logits( x, target, self.weight, pos_weight=self.pos_weight, reduction=self.reduction, ) if self.sum_classes: loss = loss.sum(-1).mean() return loss
pytorch-image-models/timm/loss/binary_cross_entropy.py/0
{ "file_path": "pytorch-image-models/timm/loss/binary_cross_entropy.py", "repo_id": "pytorch-image-models", "token_count": 1082 }
234
""" DeiT - Data-efficient Image Transformers DeiT model defs and weights from https://github.com/facebookresearch/deit, original copyright below paper: `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 paper: `DeiT III: Revenge of the ViT` - https://arxiv.org/abs/2204.07118 Modifications copyright 2021, Ross Wightman """ # Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. from functools import partial from typing import Optional import torch from torch import nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import resample_abs_pos_embed from timm.models.vision_transformer import VisionTransformer, trunc_normal_, checkpoint_filter_fn from ._builder import build_model_with_cfg from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['VisionTransformerDistilled'] # model_registry will add each entrypoint fn to this class VisionTransformerDistilled(VisionTransformer): """ Vision Transformer w/ Distillation Token and Head Distillation token & head support for `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 """ def __init__(self, *args, **kwargs): weight_init = kwargs.pop('weight_init', '') super().__init__(*args, **kwargs, weight_init='skip') assert self.global_pool in ('token',) self.num_prefix_tokens = 2 self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.pos_embed = nn.Parameter( torch.zeros(1, self.patch_embed.num_patches + self.num_prefix_tokens, self.embed_dim)) self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity() self.distilled_training = False # must set this True to train w/ distillation token self.init_weights(weight_init) def init_weights(self, mode=''): trunc_normal_(self.dist_token, std=.02) super().init_weights(mode=mode) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed|dist_token', blocks=[ (r'^blocks\.(\d+)', None), (r'^norm', (99999,))] # final norm w/ last block ) @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head, self.head_dist def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable def _pos_embed(self, x): if self.dynamic_img_size: B, H, W, C = x.shape prev_grid_size = self.patch_embed.grid_size pos_embed = resample_abs_pos_embed( self.pos_embed, new_size=(H, W), old_size=prev_grid_size, num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens, ) x = x.view(B, -1, C) else: pos_embed = self.pos_embed if self.no_embed_class: # deit-3, updated JAX (big vision) # position embedding does not overlap with class token, add then concat x = x + pos_embed x = torch.cat(( self.cls_token.expand(x.shape[0], -1, -1), self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) else: # original timm, JAX, and deit vit impl # pos_embed has entry for class token, concat then add x = torch.cat(( self.cls_token.expand(x.shape[0], -1, -1), self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) x = x + pos_embed return self.pos_drop(x) def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor: x, x_dist = x[:, 0], x[:, 1] if pre_logits: return (x + x_dist) / 2 x = self.head(x) x_dist = self.head_dist(x_dist) if self.distilled_training and self.training and not torch.jit.is_scripting(): # only return separate classification predictions when training in distilled mode return x, x_dist else: # during standard train / finetune, inference average the classifier predictions return (x + x_dist) / 2 def _create_deit(variant, pretrained=False, distilled=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) model_cls = VisionTransformerDistilled if distilled else VisionTransformer model = build_model_with_cfg( model_cls, variant, pretrained, pretrained_filter_fn=partial(checkpoint_filter_fn, adapt_layer_scale=True), feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # deit models (FB weights) 'deit_tiny_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'), 'deit_small_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'), 'deit_base_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth'), 'deit_base_patch16_384.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit_tiny_distilled_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth', classifier=('head', 'head_dist')), 'deit_small_distilled_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth', classifier=('head', 'head_dist')), 'deit_base_distilled_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth', classifier=('head', 'head_dist')), 'deit_base_distilled_patch16_384.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth', input_size=(3, 384, 384), crop_pct=1.0, classifier=('head', 'head_dist')), 'deit3_small_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_1k.pth'), 'deit3_small_patch16_384.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_1k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_medium_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_1k.pth'), 'deit3_base_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_1k.pth'), 'deit3_base_patch16_384.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_1k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_large_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_1k.pth'), 'deit3_large_patch16_384.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_1k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_huge_patch14_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_1k.pth'), 'deit3_small_patch16_224.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_21k.pth', crop_pct=1.0), 'deit3_small_patch16_384.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_21k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_medium_patch16_224.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_21k.pth', crop_pct=1.0), 'deit3_base_patch16_224.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_21k.pth', crop_pct=1.0), 'deit3_base_patch16_384.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_21k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_large_patch16_224.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_21k.pth', crop_pct=1.0), 'deit3_large_patch16_384.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_21k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_huge_patch14_224.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_21k_v1.pth', crop_pct=1.0), }) @register_model def deit_tiny_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) model = _create_deit('deit_tiny_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) model = _create_deit('deit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_deit('deit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_deit('deit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled: """ DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) model = _create_deit( 'deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) return model @register_model def deit_small_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled: """ DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) model = _create_deit( 'deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) return model @register_model def deit_base_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled: """ DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_deit( 'deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) return model @register_model def deit_base_distilled_patch16_384(pretrained=False, **kwargs) -> VisionTransformerDistilled: """ DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_deit( 'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) return model @register_model def deit3_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 small model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_small_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 small model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_small_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 medium model @ 224x224 (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 base model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_large_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 large model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_large_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 large model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_huge_patch14_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_huge_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, { 'deit3_small_patch16_224_in21ft1k': 'deit3_small_patch16_224.fb_in22k_ft_in1k', 'deit3_small_patch16_384_in21ft1k': 'deit3_small_patch16_384.fb_in22k_ft_in1k', 'deit3_medium_patch16_224_in21ft1k': 'deit3_medium_patch16_224.fb_in22k_ft_in1k', 'deit3_base_patch16_224_in21ft1k': 'deit3_base_patch16_224.fb_in22k_ft_in1k', 'deit3_base_patch16_384_in21ft1k': 'deit3_base_patch16_384.fb_in22k_ft_in1k', 'deit3_large_patch16_224_in21ft1k': 'deit3_large_patch16_224.fb_in22k_ft_in1k', 'deit3_large_patch16_384_in21ft1k': 'deit3_large_patch16_384.fb_in22k_ft_in1k', 'deit3_huge_patch14_224_in21ft1k': 'deit3_huge_patch14_224.fb_in22k_ft_in1k' })
pytorch-image-models/timm/models/deit.py/0
{ "file_path": "pytorch-image-models/timm/models/deit.py", "repo_id": "pytorch-image-models", "token_count": 8370 }
235
""" MambaOut models for image classification. Some implementations are modified from: timm (https://github.com/rwightman/pytorch-image-models), MetaFormer (https://github.com/sail-sg/metaformer), InceptionNeXt (https://github.com/sail-sg/inceptionnext) """ from collections import OrderedDict from typing import List, Optional, Tuple, Union import torch from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import trunc_normal_, DropPath, LayerNorm, LayerScale, ClNormMlpClassifierHead, get_act_layer from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs class Stem(nn.Module): r""" Code modified from InternImage: https://github.com/OpenGVLab/InternImage """ def __init__( self, in_chs=3, out_chs=96, mid_norm: bool = True, act_layer=nn.GELU, norm_layer=LayerNorm, ): super().__init__() self.conv1 = nn.Conv2d( in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1 ) self.norm1 = norm_layer(out_chs // 2) if mid_norm else None self.act = act_layer() self.conv2 = nn.Conv2d( out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1 ) self.norm2 = norm_layer(out_chs) def forward(self, x): x = self.conv1(x) if self.norm1 is not None: x = x.permute(0, 2, 3, 1) x = self.norm1(x) x = x.permute(0, 3, 1, 2) x = self.act(x) x = self.conv2(x) x = x.permute(0, 2, 3, 1) x = self.norm2(x) return x class DownsampleNormFirst(nn.Module): def __init__( self, in_chs=96, out_chs=198, norm_layer=LayerNorm, ): super().__init__() self.norm = norm_layer(in_chs) self.conv = nn.Conv2d( in_chs, out_chs, kernel_size=3, stride=2, padding=1 ) def forward(self, x): x = self.norm(x) x = x.permute(0, 3, 1, 2) x = self.conv(x) x = x.permute(0, 2, 3, 1) return x class Downsample(nn.Module): def __init__( self, in_chs=96, out_chs=198, norm_layer=LayerNorm, ): super().__init__() self.conv = nn.Conv2d( in_chs, out_chs, kernel_size=3, stride=2, padding=1 ) self.norm = norm_layer(out_chs) def forward(self, x): x = x.permute(0, 3, 1, 2) x = self.conv(x) x = x.permute(0, 2, 3, 1) x = self.norm(x) return x class MlpHead(nn.Module): """ MLP classification head """ def __init__( self, in_features, num_classes=1000, pool_type='avg', act_layer=nn.GELU, mlp_ratio=4, norm_layer=LayerNorm, drop_rate=0., bias=True, ): super().__init__() if mlp_ratio is not None: hidden_size = int(mlp_ratio * in_features) else: hidden_size = None self.pool_type = pool_type self.in_features = in_features self.hidden_size = hidden_size or in_features self.norm = norm_layer(in_features) if hidden_size: self.pre_logits = nn.Sequential(OrderedDict([ ('fc', nn.Linear(in_features, hidden_size)), ('act', act_layer()), ('norm', norm_layer(hidden_size)) ])) self.num_features = hidden_size else: self.num_features = in_features self.pre_logits = nn.Identity() self.fc = nn.Linear(self.num_features, num_classes, bias=bias) if num_classes > 0 else nn.Identity() self.head_dropout = nn.Dropout(drop_rate) def reset(self, num_classes: int, pool_type: Optional[str] = None, reset_other: bool = False): if pool_type is not None: self.pool_type = pool_type if reset_other: self.norm = nn.Identity() self.pre_logits = nn.Identity() self.num_features = self.in_features self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward(self, x, pre_logits: bool = False): if self.pool_type == 'avg': x = x.mean((1, 2)) x = self.norm(x) x = self.pre_logits(x) x = self.head_dropout(x) if pre_logits: return x x = self.fc(x) return x class GatedConvBlock(nn.Module): r""" Our implementation of Gated CNN Block: https://arxiv.org/pdf/1612.08083 Args: conv_ratio: control the number of channels to conduct depthwise convolution. Conduct convolution on partial channels can improve paraitcal efficiency. The idea of partial channels is from ShuffleNet V2 (https://arxiv.org/abs/1807.11164) and also used by InceptionNeXt (https://arxiv.org/abs/2303.16900) and FasterNet (https://arxiv.org/abs/2303.03667) """ def __init__( self, dim, expansion_ratio=8 / 3, kernel_size=7, conv_ratio=1.0, ls_init_value=None, norm_layer=LayerNorm, act_layer=nn.GELU, drop_path=0., **kwargs ): super().__init__() self.norm = norm_layer(dim) hidden = int(expansion_ratio * dim) self.fc1 = nn.Linear(dim, hidden * 2) self.act = act_layer() conv_channels = int(conv_ratio * dim) self.split_indices = (hidden, hidden - conv_channels, conv_channels) self.conv = nn.Conv2d( conv_channels, conv_channels, kernel_size=kernel_size, padding=kernel_size // 2, groups=conv_channels ) self.fc2 = nn.Linear(hidden, dim) self.ls = LayerScale(dim) if ls_init_value is not None else nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x # [B, H, W, C] x = self.norm(x) x = self.fc1(x) g, i, c = torch.split(x, self.split_indices, dim=-1) c = c.permute(0, 3, 1, 2) # [B, H, W, C] -> [B, C, H, W] c = self.conv(c) c = c.permute(0, 2, 3, 1) # [B, C, H, W] -> [B, H, W, C] x = self.fc2(self.act(g) * torch.cat((i, c), dim=-1)) x = self.ls(x) x = self.drop_path(x) return x + shortcut class MambaOutStage(nn.Module): def __init__( self, dim, dim_out: Optional[int] = None, depth: int = 4, expansion_ratio=8 / 3, kernel_size=7, conv_ratio=1.0, downsample: str = '', ls_init_value: Optional[float] = None, norm_layer=LayerNorm, act_layer=nn.GELU, drop_path=0., ): super().__init__() dim_out = dim_out or dim self.grad_checkpointing = False if downsample == 'conv': self.downsample = Downsample(dim, dim_out, norm_layer=norm_layer) elif downsample == 'conv_nf': self.downsample = DownsampleNormFirst(dim, dim_out, norm_layer=norm_layer) else: assert dim == dim_out self.downsample = nn.Identity() self.blocks = nn.Sequential(*[ GatedConvBlock( dim=dim_out, expansion_ratio=expansion_ratio, kernel_size=kernel_size, conv_ratio=conv_ratio, ls_init_value=ls_init_value, norm_layer=norm_layer, act_layer=act_layer, drop_path=drop_path[j] if isinstance(drop_path, (list, tuple)) else drop_path, ) for j in range(depth) ]) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class MambaOut(nn.Module): r""" MetaFormer A PyTorch impl of : `MetaFormer Baselines for Vision` - https://arxiv.org/abs/2210.13452 Args: in_chans (int): Number of input image channels. Default: 3. num_classes (int): Number of classes for classification head. Default: 1000. depths (list or tuple): Number of blocks at each stage. Default: [3, 3, 9, 3]. dims (int): Feature dimension at each stage. Default: [96, 192, 384, 576]. downsample_layers: (list or tuple): Downsampling layers before each stage. drop_path_rate (float): Stochastic depth rate. Default: 0. output_norm: norm before classifier head. Default: partial(nn.LayerNorm, eps=1e-6). head_fn: classification head. Default: nn.Linear. head_dropout (float): dropout for MLP classifier. Default: 0. """ def __init__( self, in_chans=3, num_classes=1000, global_pool='avg', depths=(3, 3, 9, 3), dims=(96, 192, 384, 576), norm_layer=LayerNorm, act_layer=nn.GELU, conv_ratio=1.0, expansion_ratio=8/3, kernel_size=7, stem_mid_norm=True, ls_init_value=None, downsample='conv', drop_path_rate=0., drop_rate=0., head_fn='default', ): super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.output_fmt = 'NHWC' if not isinstance(depths, (list, tuple)): depths = [depths] # it means the model has only one stage if not isinstance(dims, (list, tuple)): dims = [dims] act_layer = get_act_layer(act_layer) num_stage = len(depths) self.num_stage = num_stage self.feature_info = [] self.stem = Stem( in_chans, dims[0], mid_norm=stem_mid_norm, act_layer=act_layer, norm_layer=norm_layer, ) prev_dim = dims[0] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] cur = 0 curr_stride = 4 self.stages = nn.Sequential() for i in range(num_stage): dim = dims[i] stride = 2 if curr_stride == 2 or i > 0 else 1 curr_stride *= stride stage = MambaOutStage( dim=prev_dim, dim_out=dim, depth=depths[i], kernel_size=kernel_size, conv_ratio=conv_ratio, expansion_ratio=expansion_ratio, downsample=downsample if i > 0 else '', ls_init_value=ls_init_value, norm_layer=norm_layer, act_layer=act_layer, drop_path=dp_rates[i], ) self.stages.append(stage) prev_dim = dim # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 self.feature_info += [dict(num_chs=prev_dim, reduction=curr_stride, module=f'stages.{i}')] cur += depths[i] if head_fn == 'default': # specific to this model, unusual norm -> pool -> fc -> act -> norm -> fc combo self.head = MlpHead( prev_dim, num_classes, pool_type=global_pool, drop_rate=drop_rate, norm_layer=norm_layer, ) else: # more typical norm -> pool -> fc -> act -> fc self.head = ClNormMlpClassifierHead( prev_dim, num_classes, hidden_size=int(prev_dim * 4), pool_type=global_pool, norm_layer=norm_layer, drop_rate=drop_rate, ) self.num_features = prev_dim self.head_hidden_size = self.head.num_features self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, (nn.Conv2d, nn.Linear)): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.downsample', (0,)), # blocks (r'^stages\.(\d+)\.blocks\.(\d+)', None), ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW', 'NHWC'), 'Output format must be one of NCHW or NHWC.' channel_first = output_fmt == 'NCHW' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stages), indices) # forward pass x = self.stem(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index + 1] for feat_idx, stage in enumerate(stages): x = stage(x) if feat_idx in take_indices: intermediates.append(x) if channel_first: # reshape to BCHW output format intermediates = [y.permute(0, 3, 1, 2).contiguous() for y in intermediates] if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) return x def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'model' in state_dict: state_dict = state_dict['model'] if 'stem.conv1.weight' in state_dict: return state_dict import re out_dict = {} for k, v in state_dict.items(): k = k.replace('downsample_layers.0.', 'stem.') k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = re.sub(r'downsample_layers.([0-9]+)', r'stages.\1.downsample', k) # remap head names if k.startswith('norm.'): # this is moving to head since it's after the pooling k = k.replace('norm.', 'head.norm.') elif k.startswith('head.'): k = k.replace('head.fc1.', 'head.pre_logits.fc.') k = k.replace('head.norm.', 'head.pre_logits.norm.') k = k.replace('head.fc2.', 'head.fc.') out_dict[k] = v return out_dict def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'test_input_size': (3, 288, 288), 'pool_size': (7, 7), 'crop_pct': 1.0, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ # original weights 'mambaout_femto.in1k': _cfg( hf_hub_id='timm/'), 'mambaout_kobe.in1k': _cfg( hf_hub_id='timm/'), 'mambaout_tiny.in1k': _cfg( hf_hub_id='timm/'), 'mambaout_small.in1k': _cfg( hf_hub_id='timm/'), 'mambaout_base.in1k': _cfg( hf_hub_id='timm/'), # timm experiments below 'mambaout_small_rw.sw_e450_in1k': _cfg( hf_hub_id='timm/', ), 'mambaout_base_short_rw.sw_e500_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, ), 'mambaout_base_tall_rw.sw_e500_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, ), 'mambaout_base_wide_rw.sw_e500_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, ), 'mambaout_base_plus_rw.sw_e150_in12k_ft_in1k': _cfg( hf_hub_id='timm/', ), 'mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), test_input_size=(3, 384, 384), crop_mode='squash', pool_size=(12, 12), ), 'mambaout_base_plus_rw.sw_e150_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, ), 'test_mambaout': _cfg(input_size=(3, 160, 160), test_input_size=(3, 192, 192), pool_size=(5, 5)), }) def _create_mambaout(variant, pretrained=False, **kwargs): model = build_model_with_cfg( MambaOut, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs, ) return model # a series of MambaOut models @register_model def mambaout_femto(pretrained=False, **kwargs): model_args = dict(depths=(3, 3, 9, 3), dims=(48, 96, 192, 288)) return _create_mambaout('mambaout_femto', pretrained=pretrained, **dict(model_args, **kwargs)) # Kobe Memorial Version with 24 Gated CNN blocks @register_model def mambaout_kobe(pretrained=False, **kwargs): model_args = dict(depths=[3, 3, 15, 3], dims=[48, 96, 192, 288]) return _create_mambaout('mambaout_kobe', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def mambaout_tiny(pretrained=False, **kwargs): model_args = dict(depths=[3, 3, 9, 3], dims=[96, 192, 384, 576]) return _create_mambaout('mambaout_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def mambaout_small(pretrained=False, **kwargs): model_args = dict(depths=[3, 4, 27, 3], dims=[96, 192, 384, 576]) return _create_mambaout('mambaout_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def mambaout_base(pretrained=False, **kwargs): model_args = dict(depths=[3, 4, 27, 3], dims=[128, 256, 512, 768]) return _create_mambaout('mambaout_base', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def mambaout_small_rw(pretrained=False, **kwargs): model_args = dict( depths=[3, 4, 27, 3], dims=[96, 192, 384, 576], stem_mid_norm=False, downsample='conv_nf', ls_init_value=1e-6, head_fn='norm_mlp', ) return _create_mambaout('mambaout_small_rw', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def mambaout_base_short_rw(pretrained=False, **kwargs): model_args = dict( depths=(3, 3, 25, 3), dims=(128, 256, 512, 768), expansion_ratio=3.0, conv_ratio=1.25, stem_mid_norm=False, downsample='conv_nf', ls_init_value=1e-6, head_fn='norm_mlp', ) return _create_mambaout('mambaout_base_short_rw', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def mambaout_base_tall_rw(pretrained=False, **kwargs): model_args = dict( depths=(3, 4, 30, 3), dims=(128, 256, 512, 768), expansion_ratio=2.5, conv_ratio=1.25, stem_mid_norm=False, downsample='conv_nf', ls_init_value=1e-6, head_fn='norm_mlp', ) return _create_mambaout('mambaout_base_tall_rw', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def mambaout_base_wide_rw(pretrained=False, **kwargs): model_args = dict( depths=(3, 4, 27, 3), dims=(128, 256, 512, 768), expansion_ratio=3.0, conv_ratio=1.5, stem_mid_norm=False, downsample='conv_nf', ls_init_value=1e-6, act_layer='silu', head_fn='norm_mlp', ) return _create_mambaout('mambaout_base_wide_rw', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def mambaout_base_plus_rw(pretrained=False, **kwargs): model_args = dict( depths=(3, 4, 30, 3), dims=(128, 256, 512, 768), expansion_ratio=3.0, conv_ratio=1.5, stem_mid_norm=False, downsample='conv_nf', ls_init_value=1e-6, act_layer='silu', head_fn='norm_mlp', ) return _create_mambaout('mambaout_base_plus_rw', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def test_mambaout(pretrained=False, **kwargs): model_args = dict( depths=(1, 1, 3, 1), dims=(16, 32, 48, 64), expansion_ratio=3, stem_mid_norm=False, downsample='conv_nf', ls_init_value=1e-4, act_layer='silu', head_fn='norm_mlp', ) return _create_mambaout('test_mambaout', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/mambaout.py/0
{ "file_path": "pytorch-image-models/timm/models/mambaout.py", "repo_id": "pytorch-image-models", "token_count": 11683 }
236
""" RDNet Copyright (c) 2024-present NAVER Cloud Corp. Apache-2.0 """ from functools import partial from typing import List, Optional, Tuple, Union, Callable import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, NormMlpClassifierHead, ClassifierHead, EffectiveSEModule, \ make_divisible, get_act_layer, get_norm_layer from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply from ._registry import register_model, generate_default_cfgs __all__ = ["RDNet"] class Block(nn.Module): def __init__(self, in_chs, inter_chs, out_chs, norm_layer, act_layer): super().__init__() self.layers = nn.Sequential( nn.Conv2d(in_chs, in_chs, groups=in_chs, kernel_size=7, stride=1, padding=3), norm_layer(in_chs), nn.Conv2d(in_chs, inter_chs, kernel_size=1, stride=1, padding=0), act_layer(), nn.Conv2d(inter_chs, out_chs, kernel_size=1, stride=1, padding=0), ) def forward(self, x): return self.layers(x) class BlockESE(nn.Module): def __init__(self, in_chs, inter_chs, out_chs, norm_layer, act_layer): super().__init__() self.layers = nn.Sequential( nn.Conv2d(in_chs, in_chs, groups=in_chs, kernel_size=7, stride=1, padding=3), norm_layer(in_chs), nn.Conv2d(in_chs, inter_chs, kernel_size=1, stride=1, padding=0), act_layer(), nn.Conv2d(inter_chs, out_chs, kernel_size=1, stride=1, padding=0), EffectiveSEModule(out_chs), ) def forward(self, x): return self.layers(x) def _get_block_type(block: str): block = block.lower().strip() if block == "block": return Block elif block == "blockese": return BlockESE else: assert False, f"Unknown block type ({block})." class DenseBlock(nn.Module): def __init__( self, num_input_features: int = 64, growth_rate: int = 64, bottleneck_width_ratio: float = 4.0, drop_path_rate: float = 0.0, drop_rate: float = 0.0, rand_gather_step_prob: float = 0.0, block_idx: int = 0, block_type: str = "Block", ls_init_value: float = 1e-6, norm_layer: str = "layernorm2d", act_layer: str = "gelu", ): super().__init__() self.drop_rate = drop_rate self.drop_path_rate = drop_path_rate self.rand_gather_step_prob = rand_gather_step_prob self.block_idx = block_idx self.growth_rate = growth_rate self.gamma = nn.Parameter(ls_init_value * torch.ones(growth_rate)) if ls_init_value > 0 else None growth_rate = int(growth_rate) inter_chs = int(num_input_features * bottleneck_width_ratio / 8) * 8 self.drop_path = DropPath(drop_path_rate) self.layers = _get_block_type(block_type)( in_chs=num_input_features, inter_chs=inter_chs, out_chs=growth_rate, norm_layer=norm_layer, act_layer=act_layer, ) def forward(self, x: List[torch.Tensor]) -> torch.Tensor: x = torch.cat(x, 1) x = self.layers(x) if self.gamma is not None: x = x.mul(self.gamma.reshape(1, -1, 1, 1)) x = self.drop_path(x) return x class DenseStage(nn.Sequential): def __init__(self, num_block, num_input_features, drop_path_rates, growth_rate, **kwargs): super().__init__() for i in range(num_block): layer = DenseBlock( num_input_features=num_input_features, growth_rate=growth_rate, drop_path_rate=drop_path_rates[i], block_idx=i, **kwargs, ) num_input_features += growth_rate self.add_module(f"dense_block{i}", layer) self.num_out_features = num_input_features def forward(self, init_feature: torch.Tensor) -> torch.Tensor: features = [init_feature] for module in self: new_feature = module(features) features.append(new_feature) return torch.cat(features, 1) class RDNet(nn.Module): def __init__( self, in_chans: int = 3, # timm option [--in-chans] num_classes: int = 1000, # timm option [--num-classes] global_pool: str = 'avg', # timm option [--gp] growth_rates: Union[List[int], Tuple[int]] = (64, 104, 128, 128, 128, 128, 224), num_blocks_list: Union[List[int], Tuple[int]] = (3, 3, 3, 3, 3, 3, 3), block_type: Union[List[int], Tuple[int]] = ("Block",) * 2 + ("BlockESE",) * 5, is_downsample_block: Union[List[bool], Tuple[bool]] = (None, True, True, False, False, False, True), bottleneck_width_ratio: float = 4.0, transition_compression_ratio: float = 0.5, ls_init_value: float = 1e-6, stem_type: str = 'patch', patch_size: int = 4, num_init_features: int = 64, head_init_scale: float = 1., head_norm_first: bool = False, conv_bias: bool = True, act_layer: Union[str, Callable] = 'gelu', norm_layer: str = "layernorm2d", norm_eps: Optional[float] = None, drop_rate: float = 0.0, # timm option [--drop: dropout ratio] drop_path_rate: float = 0.0, # timm option [--drop-path: drop-path ratio] ): """ Args: in_chans: Number of input image channels. num_classes: Number of classes for classification head. global_pool: Global pooling type. growth_rates: Growth rate at each stage. num_blocks_list: Number of blocks at each stage. is_downsample_block: Whether to downsample at each stage. bottleneck_width_ratio: Bottleneck width ratio (similar to mlp expansion ratio). transition_compression_ratio: Channel compression ratio of transition layers. ls_init_value: Init value for Layer Scale, disabled if None. stem_type: Type of stem. patch_size: Stem patch size for patch stem. num_init_features: Number of features of stem. head_init_scale: Init scaling value for classifier weights and biases. head_norm_first: Apply normalization before global pool + head. conv_bias: Use bias layers w/ all convolutions. act_layer: Activation layer type. norm_layer: Normalization layer type. norm_eps: Small value to avoid division by zero in normalization. drop_rate: Head pre-classifier dropout rate. drop_path_rate: Stochastic depth drop rate. """ super().__init__() assert len(growth_rates) == len(num_blocks_list) == len(is_downsample_block) act_layer = get_act_layer(act_layer) norm_layer = get_norm_layer(norm_layer) if norm_eps is not None: norm_layer = partial(norm_layer, eps=norm_eps) self.num_classes = num_classes self.drop_rate = drop_rate # stem assert stem_type in ('patch', 'overlap', 'overlap_tiered') if stem_type == 'patch': # NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4 self.stem = nn.Sequential( nn.Conv2d(in_chans, num_init_features, kernel_size=patch_size, stride=patch_size, bias=conv_bias), norm_layer(num_init_features), ) stem_stride = patch_size else: mid_chs = make_divisible(num_init_features // 2) if 'tiered' in stem_type else num_init_features self.stem = nn.Sequential( nn.Conv2d(in_chans, mid_chs, kernel_size=3, stride=2, padding=1, bias=conv_bias), nn.Conv2d(mid_chs, num_init_features, kernel_size=3, stride=2, padding=1, bias=conv_bias), norm_layer(num_init_features), ) stem_stride = 4 # features self.feature_info = [] self.num_stages = len(growth_rates) curr_stride = stem_stride num_features = num_init_features dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(num_blocks_list)).split(num_blocks_list)] dense_stages = [] for i in range(self.num_stages): dense_stage_layers = [] if i != 0: compressed_num_features = int(num_features * transition_compression_ratio / 8) * 8 k_size = stride = 1 if is_downsample_block[i]: curr_stride *= 2 k_size = stride = 2 dense_stage_layers.append(norm_layer(num_features)) dense_stage_layers.append( nn.Conv2d(num_features, compressed_num_features, kernel_size=k_size, stride=stride, padding=0) ) num_features = compressed_num_features stage = DenseStage( num_block=num_blocks_list[i], num_input_features=num_features, growth_rate=growth_rates[i], bottleneck_width_ratio=bottleneck_width_ratio, drop_rate=drop_rate, drop_path_rates=dp_rates[i], ls_init_value=ls_init_value, block_type=block_type[i], norm_layer=norm_layer, act_layer=act_layer, ) dense_stage_layers.append(stage) num_features += num_blocks_list[i] * growth_rates[i] if i + 1 == self.num_stages or (i + 1 != self.num_stages and is_downsample_block[i + 1]): self.feature_info += [ dict( num_chs=num_features, reduction=curr_stride, module=f'dense_stages.{i}', growth_rate=growth_rates[i], ) ] dense_stages.append(nn.Sequential(*dense_stage_layers)) self.dense_stages = nn.Sequential(*dense_stages) self.num_features = self.head_hidden_size = num_features # if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets # otherwise pool -> norm -> fc, the default RDNet ordering (pretrained NV weights) if head_norm_first: self.norm_pre = norm_layer(self.num_features) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) else: self.norm_pre = nn.Identity() self.head = NormMlpClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, norm_layer=norm_layer, ) named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) @torch.jit.ignore def group_matcher(self, coarse=False): assert not coarse, "coarse grouping is not implemented for RDNet" return dict( stem=r'^stem', blocks=r'^dense_stages\.(\d+)', ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.dense_stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] stage_ends = [int(info['module'].split('.')[-1]) for info in self.feature_info] take_indices, max_index = feature_take_indices(len(stage_ends), indices) take_indices = [stage_ends[i] for i in take_indices] max_index = stage_ends[max_index] # forward pass x = self.stem(x) last_idx = len(self.dense_stages) - 1 if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript dense_stages = self.dense_stages else: dense_stages = self.dense_stages[:max_index + 1] for feat_idx, stage in enumerate(dense_stages): x = stage(x) if feat_idx in take_indices: if norm and feat_idx == last_idx: x_inter = self.norm_pre(x) # applying final norm to last intermediate else: x_inter = x intermediates.append(x_inter) if intermediates_only: return intermediates if feat_idx == last_idx: x = self.norm_pre(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ stage_ends = [int(info['module'].split('.')[-1]) for info in self.feature_info] take_indices, max_index = feature_take_indices(len(stage_ends), indices) max_index = stage_ends[max_index] self.dense_stages = self.dense_stages[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_norm: self.norm_pre = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) x = self.dense_stages(x) x = self.norm_pre(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name=None, head_init_scale=1.0): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight) elif isinstance(module, nn.BatchNorm2d): nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0) elif isinstance(module, nn.Linear): nn.init.constant_(module.bias, 0) if name and 'head.' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def checkpoint_filter_fn(state_dict, model): """ Remap NV checkpoints -> timm """ if 'stem.0.weight' in state_dict: return state_dict # non-NV checkpoint if 'model' in state_dict: state_dict = state_dict['model'] out_dict = {} for k, v in state_dict.items(): k = k.replace('stem.stem.', 'stem.') out_dict[k] = v return out_dict def _create_rdnet(variant, pretrained=False, **kwargs): model = build_model_with_cfg( RDNet, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs) return model def _cfg(url='', **kwargs): return { "url": url, "num_classes": 1000, "input_size": (3, 224, 224), "pool_size": (7, 7), "crop_pct": 0.9, "interpolation": "bicubic", "mean": IMAGENET_DEFAULT_MEAN, "std": IMAGENET_DEFAULT_STD, "first_conv": "stem.0", "classifier": "head.fc", "paper_ids": "arXiv:2403.19588", "paper_name": "DenseNets Reloaded: Paradigm Shift Beyond ResNets and ViTs", "origin_url": "https://github.com/naver-ai/rdnet", **kwargs, } default_cfgs = generate_default_cfgs({ 'rdnet_tiny.nv_in1k': _cfg( hf_hub_id='naver-ai/rdnet_tiny.nv_in1k'), 'rdnet_small.nv_in1k': _cfg( hf_hub_id='naver-ai/rdnet_small.nv_in1k'), 'rdnet_base.nv_in1k': _cfg( hf_hub_id='naver-ai/rdnet_base.nv_in1k'), 'rdnet_large.nv_in1k': _cfg( hf_hub_id='naver-ai/rdnet_large.nv_in1k'), 'rdnet_large.nv_in1k_ft_in1k_384': _cfg( hf_hub_id='naver-ai/rdnet_large.nv_in1k_ft_in1k_384', input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), }) @register_model def rdnet_tiny(pretrained=False, **kwargs): n_layer = 7 model_args = { "num_init_features": 64, "growth_rates": [64] + [104] + [128] * 4 + [224], "num_blocks_list": [3] * n_layer, "is_downsample_block": (None, True, True, False, False, False, True), "transition_compression_ratio": 0.5, "block_type": ["Block"] + ["Block"] + ["BlockESE"] * 4 + ["BlockESE"], } model = _create_rdnet("rdnet_tiny", pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def rdnet_small(pretrained=False, **kwargs): n_layer = 11 model_args = { "num_init_features": 72, "growth_rates": [64] + [128] + [128] * (n_layer - 4) + [240] * 2, "num_blocks_list": [3] * n_layer, "is_downsample_block": (None, True, True, False, False, False, False, False, False, True, False), "transition_compression_ratio": 0.5, "block_type": ["Block"] + ["Block"] + ["BlockESE"] * (n_layer - 4) + ["BlockESE"] * 2, } model = _create_rdnet("rdnet_small", pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def rdnet_base(pretrained=False, **kwargs): n_layer = 11 model_args = { "num_init_features": 120, "growth_rates": [96] + [128] + [168] * (n_layer - 4) + [336] * 2, "num_blocks_list": [3] * n_layer, "is_downsample_block": (None, True, True, False, False, False, False, False, False, True, False), "transition_compression_ratio": 0.5, "block_type": ["Block"] + ["Block"] + ["BlockESE"] * (n_layer - 4) + ["BlockESE"] * 2, } model = _create_rdnet("rdnet_base", pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def rdnet_large(pretrained=False, **kwargs): n_layer = 12 model_args = { "num_init_features": 144, "growth_rates": [128] + [192] + [256] * (n_layer - 4) + [360] * 2, "num_blocks_list": [3] * n_layer, "is_downsample_block": (None, True, True, False, False, False, False, False, False, False, True, False), "transition_compression_ratio": 0.5, "block_type": ["Block"] + ["Block"] + ["BlockESE"] * (n_layer - 4) + ["BlockESE"] * 2, } model = _create_rdnet("rdnet_large", pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/rdnet.py/0
{ "file_path": "pytorch-image-models/timm/models/rdnet.py", "repo_id": "pytorch-image-models", "token_count": 9547 }
237
"""SwiftFormer SwiftFormer: Efficient Additive Attention for Transformer-based Real-time Mobile Vision Applications Code: https://github.com/Amshaker/SwiftFormer Paper: https://arxiv.org/pdf/2303.15446 @InProceedings{Shaker_2023_ICCV, author = {Shaker, Abdelrahman and Maaz, Muhammad and Rasheed, Hanoona and Khan, Salman and Yang, Ming-Hsuan and Khan, Fahad Shahbaz}, title = {SwiftFormer: Efficient Additive Attention for Transformer-based Real-time Mobile Vision Applications}, booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, year = {2023}, } """ import re from typing import Any, Dict, List, Optional, Set, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, Linear, LayerType, to_2tuple, trunc_normal_ from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['SwiftFormer'] class LayerScale2d(nn.Module): def __init__(self, dim: int, init_values: float = 1e-5, inplace: bool = False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter( init_values * torch.ones(dim, 1, 1), requires_grad=True) def forward(self, x: torch.Tensor) -> torch.Tensor: return x.mul_(self.gamma) if self.inplace else x * self.gamma class Embedding(nn.Module): """ Patch Embedding that is implemented by a layer of conv. Input: tensor in shape [B, C, H, W] Output: tensor in shape [B, C, H/stride, W/stride] """ def __init__( self, in_chans: int = 3, embed_dim: int = 768, patch_size: int = 16, stride: int = 16, padding: int = 0, norm_layer: LayerType = nn.BatchNorm2d, ): super().__init__() patch_size = to_2tuple(patch_size) stride = to_2tuple(stride) padding = to_2tuple(padding) self.proj = nn.Conv2d(in_chans, embed_dim, patch_size, stride, padding) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj(x) x = self.norm(x) return x class ConvEncoder(nn.Module): """ Implementation of ConvEncoder with 3*3 and 1*1 convolutions. Input: tensor with shape [B, C, H, W] Output: tensor with shape [B, C, H, W] """ def __init__( self, dim: int, hidden_dim: int = 64, kernel_size: int = 3, drop_path: float = 0., act_layer: LayerType = nn.GELU, norm_layer: LayerType = nn.BatchNorm2d, use_layer_scale: bool = True, ): super().__init__() self.dwconv = nn.Conv2d(dim, dim, kernel_size, padding=kernel_size // 2, groups=dim) self.norm = norm_layer(dim) self.pwconv1 = nn.Conv2d(dim, hidden_dim, 1) self.act = act_layer() self.pwconv2 = nn.Conv2d(hidden_dim, dim, 1) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.layer_scale = LayerScale2d(dim, 1) if use_layer_scale else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: input = x x = self.dwconv(x) x = self.norm(x) x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) x = self.layer_scale(x) x = input + self.drop_path(x) return x class Mlp(nn.Module): """ Implementation of MLP layer with 1*1 convolutions. Input: tensor with shape [B, C, H, W] Output: tensor with shape [B, C, H, W] """ def __init__( self, in_features: int, hidden_features: Optional[int] = None, out_features: Optional[int] = None, act_layer: LayerType = nn.GELU, norm_layer: LayerType = nn.BatchNorm2d, drop: float = 0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.norm1 = norm_layer(in_features) self.fc1 = nn.Conv2d(in_features, hidden_features, 1) self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, 1) self.drop = nn.Dropout(drop) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.norm1(x) x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class EfficientAdditiveAttention(nn.Module): """ Efficient Additive Attention module for SwiftFormer. Input: tensor in shape [B, C, H, W] Output: tensor in shape [B, C, H, W] """ def __init__(self, in_dims: int = 512, token_dim: int = 256, num_heads: int = 1): super().__init__() self.scale_factor = token_dim ** -0.5 self.to_query = nn.Linear(in_dims, token_dim * num_heads) self.to_key = nn.Linear(in_dims, token_dim * num_heads) self.w_g = nn.Parameter(torch.randn(token_dim * num_heads, 1)) self.proj = nn.Linear(token_dim * num_heads, token_dim * num_heads) self.final = nn.Linear(token_dim * num_heads, token_dim) def forward(self, x: torch.Tensor) -> torch.Tensor: B, _, H, W = x.shape x = x.flatten(2).permute(0, 2, 1) query = F.normalize(self.to_query(x), dim=-1) key = F.normalize(self.to_key(x), dim=-1) attn = F.normalize(query @ self.w_g * self.scale_factor, dim=1) attn = torch.sum(attn * query, dim=1, keepdim=True) out = self.proj(attn * key) + query out = self.final(out).permute(0, 2, 1).reshape(B, -1, H, W) return out class LocalRepresentation(nn.Module): """ Local Representation module for SwiftFormer that is implemented by 3*3 depth-wise and point-wise convolutions. Input: tensor in shape [B, C, H, W] Output: tensor in shape [B, C, H, W] """ def __init__( self, dim: int, kernel_size: int = 3, drop_path: float = 0., use_layer_scale: bool = True, act_layer: LayerType = nn.GELU, norm_layer: LayerType = nn.BatchNorm2d, ): super().__init__() self.dwconv = nn.Conv2d(dim, dim, kernel_size, padding=kernel_size // 2, groups=dim) self.norm = norm_layer(dim) self.pwconv1 = nn.Conv2d(dim, dim, kernel_size=1) self.act = act_layer() self.pwconv2 = nn.Conv2d(dim, dim, kernel_size=1) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.layer_scale = LayerScale2d(dim, 1) if use_layer_scale else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: skip = x x = self.dwconv(x) x = self.norm(x) x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) x = self.layer_scale(x) x = skip + self.drop_path(x) return x class Block(nn.Module): """ SwiftFormer Encoder Block for SwiftFormer. It consists of : (1) Local representation module, (2) EfficientAdditiveAttention, and (3) MLP block. Input: tensor in shape [B, C, H, W] Output: tensor in shape [B, C, H, W] """ def __init__( self, dim: int, mlp_ratio: float = 4., drop_rate: float = 0., drop_path: float = 0., act_layer: LayerType = nn.GELU, norm_layer: LayerType = nn.BatchNorm2d, use_layer_scale: bool = True, layer_scale_init_value: float = 1e-5, ): super().__init__() self.local_representation = LocalRepresentation( dim=dim, use_layer_scale=use_layer_scale, act_layer=act_layer, norm_layer=norm_layer, ) self.attn = EfficientAdditiveAttention(in_dims=dim, token_dim=dim) self.linear = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, norm_layer=norm_layer, drop=drop_rate, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.layer_scale_1 = LayerScale2d(dim, layer_scale_init_value) \ if use_layer_scale else nn.Identity() self.layer_scale_2 = LayerScale2d(dim, layer_scale_init_value) \ if use_layer_scale else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.local_representation(x) x = x + self.drop_path(self.layer_scale_1(self.attn(x))) x = x + self.drop_path(self.layer_scale_2(self.linear(x))) return x class Stage(nn.Module): """ Implementation of each SwiftFormer stages. Here, SwiftFormerEncoder used as the last block in all stages, while ConvEncoder used in the rest of the blocks. Input: tensor in shape [B, C, H, W] Output: tensor in shape [B, C, H, W] """ def __init__( self, dim: int, index: int, layers: List[int], mlp_ratio: float = 4., act_layer: LayerType = nn.GELU, norm_layer: LayerType = nn.BatchNorm2d, drop_rate: float = 0., drop_path_rate: float = 0., use_layer_scale: bool = True, layer_scale_init_value: float = 1e-5, downsample: Optional[LayerType] = None, ): super().__init__() self.grad_checkpointing = False self.downsample = downsample if downsample is not None else nn.Identity() blocks = [] for block_idx in range(layers[index]): block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) if layers[index] - block_idx <= 1: blocks.append(Block( dim, mlp_ratio=mlp_ratio, drop_rate=drop_rate, drop_path=block_dpr, act_layer=act_layer, norm_layer=norm_layer, use_layer_scale=use_layer_scale, layer_scale_init_value=layer_scale_init_value, )) else: blocks.append(ConvEncoder( dim=dim, hidden_dim=int(mlp_ratio * dim), kernel_size=3, drop_path=block_dpr, act_layer=act_layer, norm_layer=norm_layer, use_layer_scale=use_layer_scale, )) self.blocks = nn.Sequential(*blocks) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class SwiftFormer(nn.Module): def __init__( self, layers: List[int] = [3, 3, 6, 4], embed_dims: List[int] = [48, 56, 112, 220], mlp_ratios: int = 4, downsamples: List[bool] = [False, True, True, True], act_layer: LayerType = nn.GELU, down_patch_size: int = 3, down_stride: int = 2, down_pad: int = 1, num_classes: int = 1000, drop_rate: float = 0., drop_path_rate: float = 0., use_layer_scale: bool = True, layer_scale_init_value: float = 1e-5, global_pool: str = 'avg', output_stride: int = 32, in_chans: int = 3, **kwargs, ): super().__init__() assert output_stride == 32 self.num_classes = num_classes self.global_pool = global_pool self.feature_info = [] self.stem = nn.Sequential( nn.Conv2d(in_chans, embed_dims[0] // 2, 3, 2, 1), nn.BatchNorm2d(embed_dims[0] // 2), nn.ReLU(), nn.Conv2d(embed_dims[0] // 2, embed_dims[0], 3, 2, 1), nn.BatchNorm2d(embed_dims[0]), nn.ReLU(), ) prev_dim = embed_dims[0] stages = [] for i in range(len(layers)): downsample = Embedding( in_chans=prev_dim, embed_dim=embed_dims[i], patch_size=down_patch_size, stride=down_stride, padding=down_pad, ) if downsamples[i] else nn.Identity() stage = Stage( dim=embed_dims[i], index=i, layers=layers, mlp_ratio=mlp_ratios, act_layer=act_layer, drop_rate=drop_rate, drop_path_rate=drop_path_rate, use_layer_scale=use_layer_scale, layer_scale_init_value=layer_scale_init_value, downsample=downsample, ) prev_dim = embed_dims[i] stages.append(stage) self.feature_info += [dict(num_chs=embed_dims[i], reduction=2**(i+2), module=f'stages.{i}')] self.stages = nn.Sequential(*stages) # Classifier head self.num_features = self.head_hidden_size = out_chs = embed_dims[-1] self.norm = nn.BatchNorm2d(out_chs) self.head_drop = nn.Dropout(drop_rate) self.head = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() # assuming model is always distilled (valid for current checkpoints, will split def if that changes) self.head_dist = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() self.distilled_training = False # must set this True to train w/ distillation token self._initialize_weights() def _initialize_weights(self): for name, m in self.named_modules(): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Conv2d): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self) -> Set: return set() @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict[str, Any]: matcher = dict( stem=r'^stem', # stem and embed blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+).downsample', (0,)), (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^norm', (99999,)), ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> Tuple[nn.Module, nn.Module]: return self.head, self.head_dist def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def set_distilled_training(self, enable: bool = True): self.distilled_training = enable def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stages), indices) last_idx = len(self.stages) - 1 # forward pass x = self.stem(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index + 1] for feat_idx, stage in enumerate(stages): x = stage(x) if feat_idx in take_indices: if norm and feat_idx == last_idx: x_inter = self.norm(x) # applying final norm last intermediate else: x_inter = x intermediates.append(x_inter) if intermediates_only: return intermediates if feat_idx == last_idx: x = self.norm(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False): if self.global_pool == 'avg': x = x.mean(dim=(2, 3)) x = self.head_drop(x) if pre_logits: return x x, x_dist = self.head(x), self.head_dist(x) if self.distilled_training and self.training and not torch.jit.is_scripting(): # only return separate classification predictions when training in distilled mode return x, x_dist else: # during standard train/finetune, inference average the classifier predictions return (x + x_dist) / 2 def forward(self, x: torch.Tensor): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict: Dict[str, torch.Tensor], model: nn.Module) -> Dict[str, torch.Tensor]: state_dict = state_dict.get('model', state_dict) if 'stem.0.weight' in state_dict: return state_dict out_dict = {} for k, v in state_dict.items(): k = k.replace('patch_embed.', 'stem.') k = k.replace('dist_head.', 'head_dist.') k = k.replace('attn.Proj.', 'attn.proj.') k = k.replace('.layer_scale_1', '.layer_scale_1.gamma') k = k.replace('.layer_scale_2', '.layer_scale_2.gamma') k = re.sub(r'\.layer_scale(?=$|\.)', '.layer_scale.gamma', k) m = re.match(r'^network\.(\d+)\.(.*)', k) if m: n_idx, rest = int(m.group(1)), m.group(2) stage_idx = n_idx // 2 if n_idx % 2 == 0: k = f'stages.{stage_idx}.blocks.{rest}' else: k = f'stages.{stage_idx+1}.downsample.{rest}' out_dict[k] = v return out_dict def _cfg(url: str = '', **kwargs: Any) -> Dict[str, Any]: return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True, 'crop_pct': .95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': ('head', 'head_dist'), 'paper_ids': 'arXiv:2303.15446', 'paper_name': 'SwiftFormer: Efficient Additive Attention for Transformer-based Real-time Mobile Vision Applications', 'origin_url': 'https://github.com/Amshaker/SwiftFormer', **kwargs } default_cfgs = generate_default_cfgs({ 'swiftformer_xs.dist_in1k': _cfg( hf_hub_id='timm/', ), 'swiftformer_s.dist_in1k': _cfg( hf_hub_id='timm/' ), 'swiftformer_l1.dist_in1k': _cfg( hf_hub_id='timm/' ), 'swiftformer_l3.dist_in1k': _cfg( hf_hub_id='timm/' ), }) def _create_swiftformer(variant: str, pretrained: bool = False, **kwargs: Any) -> SwiftFormer: model = build_model_with_cfg( SwiftFormer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs, ) return model @register_model def swiftformer_xs(pretrained: bool = False, **kwargs: Any) -> SwiftFormer: model_args = dict(layers=[3, 3, 6, 4], embed_dims=[48, 56, 112, 220]) return _create_swiftformer('swiftformer_xs', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swiftformer_s(pretrained: bool = False, **kwargs: Any) -> SwiftFormer: model_args = dict(layers=[3, 3, 9, 6], embed_dims=[48, 64, 168, 224]) return _create_swiftformer('swiftformer_s', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swiftformer_l1(pretrained: bool = False, **kwargs: Any) -> SwiftFormer: model_args = dict(layers=[4, 3, 10, 5], embed_dims=[48, 96, 192, 384]) return _create_swiftformer('swiftformer_l1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swiftformer_l3(pretrained: bool = False, **kwargs: Any) -> SwiftFormer: model_args = dict(layers=[4, 4, 12, 6], embed_dims=[64, 128, 320, 512]) return _create_swiftformer('swiftformer_l3', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/swiftformer.py/0
{ "file_path": "pytorch-image-models/timm/models/swiftformer.py", "repo_id": "pytorch-image-models", "token_count": 10987 }
238
""" VoVNet (V1 & V2) Papers: * `An Energy and GPU-Computation Efficient Backbone Network` - https://arxiv.org/abs/1904.09730 * `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 Looked at https://github.com/youngwanLEE/vovnet-detectron2 & https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py for some reference, rewrote most of the code. Hacked together by / Copyright 2020 Ross Wightman """ from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ConvNormAct, SeparableConvNormAct, BatchNormAct2d, ClassifierHead, DropPath, \ create_attn, create_norm_act_layer from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['VovNet'] # model_registry will add each entrypoint fn to this class SequentialAppendList(nn.Sequential): def __init__(self, *args): super(SequentialAppendList, self).__init__(*args) def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor: for i, module in enumerate(self): if i == 0: concat_list.append(module(x)) else: concat_list.append(module(concat_list[-1])) x = torch.cat(concat_list, dim=1) return x class OsaBlock(nn.Module): def __init__( self, in_chs, mid_chs, out_chs, layer_per_block, residual=False, depthwise=False, attn='', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path=None, ): super(OsaBlock, self).__init__() self.residual = residual self.depthwise = depthwise conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) next_in_chs = in_chs if self.depthwise and next_in_chs != mid_chs: assert not residual self.conv_reduction = ConvNormAct(next_in_chs, mid_chs, 1, **conv_kwargs) else: self.conv_reduction = None mid_convs = [] for i in range(layer_per_block): if self.depthwise: conv = SeparableConvNormAct(mid_chs, mid_chs, **conv_kwargs) else: conv = ConvNormAct(next_in_chs, mid_chs, 3, **conv_kwargs) next_in_chs = mid_chs mid_convs.append(conv) self.conv_mid = SequentialAppendList(*mid_convs) # feature aggregation next_in_chs = in_chs + layer_per_block * mid_chs self.conv_concat = ConvNormAct(next_in_chs, out_chs, **conv_kwargs) self.attn = create_attn(attn, out_chs) if attn else None self.drop_path = drop_path def forward(self, x): output = [x] if self.conv_reduction is not None: x = self.conv_reduction(x) x = self.conv_mid(x, output) x = self.conv_concat(x) if self.attn is not None: x = self.attn(x) if self.drop_path is not None: x = self.drop_path(x) if self.residual: x = x + output[0] return x class OsaStage(nn.Module): def __init__( self, in_chs, mid_chs, out_chs, block_per_stage, layer_per_block, downsample=True, residual=True, depthwise=False, attn='ese', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path_rates=None, ): super(OsaStage, self).__init__() self.grad_checkpointing = False if downsample: self.pool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) else: self.pool = None blocks = [] for i in range(block_per_stage): last_block = i == block_per_stage - 1 if drop_path_rates is not None and drop_path_rates[i] > 0.: drop_path = DropPath(drop_path_rates[i]) else: drop_path = None blocks += [OsaBlock( in_chs, mid_chs, out_chs, layer_per_block, residual=residual and i > 0, depthwise=depthwise, attn=attn if last_block else '', norm_layer=norm_layer, act_layer=act_layer, drop_path=drop_path )] in_chs = out_chs self.blocks = nn.Sequential(*blocks) def forward(self, x): if self.pool is not None: x = self.pool(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class VovNet(nn.Module): def __init__( self, cfg, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0., **kwargs, ): """ Args: cfg (dict): Model architecture configuration in_chans (int): Number of input channels (default: 3) num_classes (int): Number of classifier classes (default: 1000) global_pool (str): Global pooling type (default: 'avg') output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32) norm_layer (Union[str, nn.Module]): normalization layer act_layer (Union[str, nn.Module]): activation layer drop_rate (float): Dropout rate (default: 0.) drop_path_rate (float): Stochastic depth drop-path rate (default: 0.) kwargs (dict): Extra kwargs overlayed onto cfg """ super(VovNet, self).__init__() self.num_classes = num_classes self.drop_rate = drop_rate assert output_stride == 32 # FIXME support dilation cfg = dict(cfg, **kwargs) stem_stride = cfg.get("stem_stride", 4) stem_chs = cfg["stem_chs"] stage_conv_chs = cfg["stage_conv_chs"] stage_out_chs = cfg["stage_out_chs"] block_per_stage = cfg["block_per_stage"] layer_per_block = cfg["layer_per_block"] conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) # Stem module last_stem_stride = stem_stride // 2 conv_type = SeparableConvNormAct if cfg["depthwise"] else ConvNormAct self.stem = nn.Sequential(*[ ConvNormAct(in_chans, stem_chs[0], 3, stride=2, **conv_kwargs), conv_type(stem_chs[0], stem_chs[1], 3, stride=1, **conv_kwargs), conv_type(stem_chs[1], stem_chs[2], 3, stride=last_stem_stride, **conv_kwargs), ]) self.feature_info = [dict( num_chs=stem_chs[1], reduction=2, module=f'stem.{1 if stem_stride == 4 else 2}')] current_stride = stem_stride # OSA stages stage_dpr = torch.split(torch.linspace(0, drop_path_rate, sum(block_per_stage)), block_per_stage) in_ch_list = stem_chs[-1:] + stage_out_chs[:-1] stage_args = dict(residual=cfg["residual"], depthwise=cfg["depthwise"], attn=cfg["attn"], **conv_kwargs) stages = [] for i in range(4): # num_stages downsample = stem_stride == 2 or i > 0 # first stage has no stride/downsample if stem_stride is 4 stages += [OsaStage( in_ch_list[i], stage_conv_chs[i], stage_out_chs[i], block_per_stage[i], layer_per_block, downsample=downsample, drop_path_rates=stage_dpr[i], **stage_args, )] self.num_features = stage_out_chs[i] current_stride *= 2 if downsample else 1 self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.head_hidden_size = self.num_features self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) for n, m in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.Linear): nn.init.zeros_(m.bias) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else r'^stages\.(\d+).blocks\.(\d+)', ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes, global_pool: Optional[str] = None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(5, indices) # forward pass feat_idx = 0 x = self.stem[:-1](x) if feat_idx in take_indices: intermediates.append(x) x = self.stem[-1](x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index] for feat_idx, stage in enumerate(stages, start=1): x = stage(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(5, indices) self.stages = self.stages[:max_index] # truncate blocks w/ stem as idx 0 if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) return self.stages(x) def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x # model cfgs adapted from https://github.com/youngwanLEE/vovnet-detectron2 & # https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py model_cfgs = dict( vovnet39a=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 2, 2], residual=False, depthwise=False, attn='', ), vovnet57a=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 4, 3], residual=False, depthwise=False, attn='', ), ese_vovnet19b_slim_dw=dict( stem_chs=[64, 64, 64], stage_conv_chs=[64, 80, 96, 112], stage_out_chs=[112, 256, 384, 512], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=True, attn='ese', ), ese_vovnet19b_dw=dict( stem_chs=[64, 64, 64], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=True, attn='ese', ), ese_vovnet19b_slim=dict( stem_chs=[64, 64, 128], stage_conv_chs=[64, 80, 96, 112], stage_out_chs=[112, 256, 384, 512], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=False, attn='ese', ), ese_vovnet19b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=False, attn='ese', ), ese_vovnet39b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 2, 2], residual=True, depthwise=False, attn='ese', ), ese_vovnet57b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 4, 3], residual=True, depthwise=False, attn='ese', ), ese_vovnet99b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 3, 9, 3], residual=True, depthwise=False, attn='ese', ), eca_vovnet39b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 2, 2], residual=True, depthwise=False, attn='eca', ), ) model_cfgs['ese_vovnet39b_evos'] = model_cfgs['ese_vovnet39b'] def _create_vovnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( VovNet, variant, pretrained, model_cfg=model_cfgs[variant], feature_cfg=dict(flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', **kwargs, } default_cfgs = generate_default_cfgs({ 'vovnet39a.untrained': _cfg(url=''), 'vovnet57a.untrained': _cfg(url=''), 'ese_vovnet19b_slim_dw.untrained': _cfg(url=''), 'ese_vovnet19b_dw.ra_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'ese_vovnet19b_slim.untrained': _cfg(url=''), 'ese_vovnet39b.ra_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'ese_vovnet57b.ra4_e3600_r256_in1k': _cfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 320, 320), test_crop_pct=1.0 ), 'ese_vovnet99b.untrained': _cfg(url=''), 'eca_vovnet39b.untrained': _cfg(url=''), 'ese_vovnet39b_evos.untrained': _cfg(url=''), }) @register_model def vovnet39a(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('vovnet39a', pretrained=pretrained, **kwargs) @register_model def vovnet57a(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs) @register_model def ese_vovnet19b_slim_dw(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs) @register_model def ese_vovnet19b_dw(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs) @register_model def ese_vovnet19b_slim(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet19b_slim', pretrained=pretrained, **kwargs) @register_model def ese_vovnet39b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs) @register_model def ese_vovnet57b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet57b', pretrained=pretrained, **kwargs) @register_model def ese_vovnet99b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet99b', pretrained=pretrained, **kwargs) @register_model def eca_vovnet39b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('eca_vovnet39b', pretrained=pretrained, **kwargs) # Experimental Models @register_model def ese_vovnet39b_evos(pretrained=False, **kwargs) -> VovNet: def norm_act_fn(num_features, **nkwargs): return create_norm_act_layer('evonorms0', num_features, jit=False, **nkwargs) return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs)
pytorch-image-models/timm/models/vovnet.py/0
{ "file_path": "pytorch-image-models/timm/models/vovnet.py", "repo_id": "pytorch-image-models", "token_count": 9084 }
239
""" PyTorch Implementation of the Kron (PSGD) optimizer This is a PSGD optimizer using a Kronecker-factored preconditioner. This impl was adapted from https://github.com/evanatyourservice/kron_torch by Evan Walters, licensed CC-BY-4.0. Contributions to above also made by * Lucas Nestler, added to his https://github.com/ClashLuke/HeavyBall implementation. * Omead Pooladzandi https://github.com/opooladz The above work drew from https://github.com/lixilinx/psgd_torch by Xi-Lin Li References for added functionality: Cautious Optimizers: https://arxiv.org/abs/2411.16085 Why Gradients Rapidly Increase Near the End of Training: https://arxiv.org/abs/2506.02285 This `timm` impl * works with a wider variety of torch versions * fixes some checkpoint save/restore (resume issues) * adds decoupled weight-decay option * has some refactoring, cleanup of args, default/group items * warning about not having opt_einsum (unusable without) """ import logging import string import random import warnings from typing import Any, Callable, Dict, Optional, Tuple, Union import numpy as np import torch try: # NOTE opt_einsum needed to avoid blowing up memory with einsum ops import opt_einsum import torch.backends.opt_einsum torch.backends.opt_einsum.enabled = True torch.backends.opt_einsum.strategy = "auto-hq" has_opt_einsum = True except ImportError: has_opt_einsum = False try: torch._dynamo.config.cache_size_limit = 1_000_000 has_dynamo = True except AttributeError: has_dynamo = False from ._types import ParamsT _logger = logging.getLogger(__name__) def precond_update_prob_schedule( n: float, max_prob: float = 1.0, min_prob: float = 0.03, decay: float = 0.001, flat_start: float = 500, ) -> torch.Tensor: """Anneal preconditioner update probability during beginning of training. PSGD benefits from more preconditioner updates at the beginning of training, but once the preconditioner is learned the update probability can drop low. This schedule is an exponential anneal with a flat start. Default settings keep update probability at 1.0 for 200 steps then exponentially anneal down to `min_prob` by 4000 steps. Default settings work very well for most models and training regimes. """ """Exponential anneal with flat start.""" n = torch.tensor(n, dtype=torch.float32) prob = max_prob * torch.exp(-decay * (n - flat_start)) prob.clamp_(min=min_prob, max=max_prob) return prob class Kron(torch.optim.Optimizer): """Implements PSGD Kron from https://github.com/lixilinx/psgd_torch. Args: params: Iterable of parameters to optimize or dicts defining parameter groups. lr: Learning rate. momentum: Momentum parameter. weight_decay: Weight decay. preconditioner_update_probability: Probability of updating the preconditioner. If None, defaults to a schedule that anneals from 1.0 to 0.03 by 4000 steps. max_size_triangular: Max size for dim's preconditioner to be triangular. min_ndim_triangular: Minimum number of dimensions a layer needs to have triangular preconditioners. memory_save_mode: 'one_diag', 'smart_one_diag', or 'all_diag', None is default to set all preconditioners to be triangular, 'one_diag' sets the largest or last dim to be diagonal per layer, and 'all_diag' sets all preconditioners to be diagonal. momentum_into_precond_update: whether to send momentum into preconditioner update instead of raw gradients. mu_dtype: Dtype of the momentum accumulator. precond_dtype: Dtype of the preconditioner. decoupled_decay: AdamW style decoupled weight decay corrected_weight_decay: apply corrected weight decay when using decoupled_decay (lr**2 / max_lr) flatten: Flatten dimensions instead of fully relying on expressions for higher rank params flatten_start_dim: Start of flatten range, defaults to 2. Seems good tradeoff for ConvNets. flatten_end_dim: End of flatten range, defaults to -1. stochastic_weight_decay: Enable random modulation of weight decay deterministic: Deterministic behaviour across save / load (resume). FIXME slow, needs work """ def __init__( self, params: ParamsT, lr: float = 0.001, momentum: float = 0.9, weight_decay: float = 0.0, preconditioner_update_probability: Optional[Union[Callable, float]] = None, max_size_triangular: int = 2048, min_ndim_triangular: int = 2, memory_save_mode: Optional[str] = None, momentum_into_precond_update: bool = True, precond_lr: float = 0.1, precond_init_scale: float = 1.0, mu_dtype: Optional[torch.dtype] = None, precond_dtype: Optional[torch.dtype] = None, decoupled_decay: bool = False, corrected_weight_decay: bool = False, flatten: bool = False, flatten_start_dim: int = 2, flatten_end_dim: int = -1, stochastic_weight_decay: bool = False, deterministic: bool = False, ): if not has_opt_einsum: warnings.warn("It is highly recommended to have 'opt_einsum' installed for this optimizer.") if not 0.0 <= lr: raise ValueError(f"Invalid learning rate: {lr}") if not 0.0 <= momentum < 1.0: raise ValueError(f"Invalid beta parameter: {momentum}") if not 0.0 <= weight_decay: raise ValueError(f"Invalid weight_decay value: {weight_decay}") defaults = dict( lr=lr, momentum=momentum, weight_decay=weight_decay, preconditioner_update_probability=preconditioner_update_probability, max_size_triangular=max_size_triangular, min_ndim_triangular=min_ndim_triangular, memory_save_mode=memory_save_mode, momentum_into_precond_update=momentum_into_precond_update, precond_lr=precond_lr, precond_init_scale=precond_init_scale, mu_dtype=mu_dtype, precond_dtype=precond_dtype, decoupled_decay=decoupled_decay, corrected_weight_decay=corrected_weight_decay, flatten=flatten, flatten_start_dim=flatten_start_dim, flatten_end_dim=flatten_end_dim, stochastic_weight_decay=stochastic_weight_decay, ) super(Kron, self).__init__(params, defaults) self._param_exprs = {} # cache for einsum expr self._tiny = torch.finfo(torch.bfloat16).tiny self.rng = random.Random(1337) self.deterministic = deterministic # make compile optional (for bwd compat) if has_dynamo: self._calc_A_and_conjB = torch.compile(_calc_A_and_conjB, fullgraph=True, dynamic=False) self._q_terms = torch.compile(_q_terms, fullgraph=True, dynamic=False) self._precond_grad = torch.compile(_precond_grad, fullgraph=True, dynamic=False) self._balance_Q = torch.compile(_balance_Q, fullgraph=True, dynamic=False) else: self._calc_A_and_conjB = _calc_A_and_conjB self._q_terms = _q_terms self._precond_grad = _precond_grad self._balance_Q = _balance_Q def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('corrected_weight_decay', False) def __getstate__(self): _dict = super().__getstate__() _dict["rng"] = self.rng return _dict def state_dict(self) -> Dict[str, Any]: # Get the optimizer's state dict optimizer_state = super().state_dict() # Add the generator state optimizer_state['rng_state'] = self.rng.getstate() return optimizer_state def load_state_dict(self, state_dict: Dict[str, Any]) -> None: # Extract and remove the RNG state from the state dict rng_states = {} if 'rng_state' in state_dict: rng_states['rng_state'] = state_dict.pop('rng_state') # Load the optimizer state super().load_state_dict(state_dict) state_dict.update(rng_states) # add back # Restore the RNG state if it exists if 'rng_state' in rng_states: self.rng.setstate(rng_states['rng_state']) def __setstate__(self, state): super().__setstate__(state) self._param_exprs = {} @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() total_momentum_size = 0 total_momentum_mb = 0 total_precond_size = 0 total_precond_mb = 0 for group in self.param_groups: mu_dtype = group.get("mu_dtype") precond_dtype = group.get("precond_dtype", torch.float32) momentum_into_precond_update = group.get("momentum_into_precond_update", True) update_prob = group.get("preconditioner_update_probability", None) for p in group["params"]: if p.grad is None: continue grad = p.grad state = self.state[p] flattened = False if group['flatten']: grad = safe_flatten(grad, group["flatten_start_dim"], group["flatten_end_dim"]) flattened = True if len(state) == 0: state["step"] = 0 state["update_counter"] = 0 state["momentum_buffer"] = torch.zeros_like(grad, dtype=mu_dtype or grad.dtype) # init Q and einsum expressions on first step state["Q"], exprs = _init_Q_exprs( grad, group["precond_init_scale"], group["max_size_triangular"], group["min_ndim_triangular"], group["memory_save_mode"], dtype=precond_dtype, ) self._param_exprs[p] = exprs # Accumulate sizes for log momentum_size = state["momentum_buffer"].numel() momentum_mb = momentum_size * state["momentum_buffer"].element_size() / 2**20 total_momentum_size += momentum_size total_momentum_mb += momentum_mb precond_size = sum(q.numel() for q in state["Q"]) precond_mb = sum(q.numel() * q.element_size() for q in state["Q"]) / 2**20 total_precond_size += precond_size total_precond_mb += precond_mb elif p not in self._param_exprs: # init only the einsum expressions, called after state load, Q are loaded from state_dict exprs = _init_Q_exprs( grad, group["precond_init_scale"], group["max_size_triangular"], group["min_ndim_triangular"], group["memory_save_mode"], dtype=precond_dtype, init_q=False, ) self._param_exprs[p] = exprs else: # retrieve cached expressions exprs = self._param_exprs[p] # update preconditioners all together deterministically if update_prob is None: update_prob = precond_update_prob_schedule if callable(update_prob): update_prob = update_prob(state["step"]) state["update_counter"] += 1 do_update = state["update_counter"] >= 1 / update_prob if do_update: state["update_counter"] = 0 state["step"] += 1 # Update momentum buffer beta = group["momentum"] bias_correction = 1 - beta ** state["step"] momentum_buffer = state["momentum_buffer"] momentum_buffer.mul_(group["momentum"]).add_(grad, alpha=1 - group["momentum"]) # Restore momentum dtype if mu_dtype is not None: momentum_buffer.copy_(momentum_buffer.to(dtype=mu_dtype)) debiased_momentum = (momentum_buffer / bias_correction).to(dtype=precond_dtype) # Balance preconditioners roughly every 100 updates balance = self.rng.random() < 0.01 and do_update if grad.dim() > 1 and balance: self._balance_Q(state["Q"]) # Update preconditioner if do_update: exprA, exprGs, _ = exprs Q = state["Q"] if self.deterministic: torch_rng = torch.Generator(device=debiased_momentum.device) torch_rng.manual_seed(self.rng.randint(0, 2 ** 31)) else: torch_rng = None V = torch.randn( debiased_momentum.shape, generator=torch_rng, dtype=precond_dtype, device=debiased_momentum.device, ) G = debiased_momentum if momentum_into_precond_update else grad A, conjB = self._calc_A_and_conjB(exprA, G, Q, V) terms = self._q_terms(exprGs, A, conjB) for q, (term1, term2) in zip(Q, terms): tmp = term1 - term2 tmp *= group["precond_lr"] if q.dim() < 2: tmp *= q tmp /= (term1 + term2).norm(float("inf")) + self._tiny else: tmp = torch.triu(tmp) tmp /= _norm_lower_bound(term1 + term2) + self._tiny tmp @= q q.sub_(tmp) # Precondition gradients pre_grad = self._precond_grad( state["Q"], exprs, debiased_momentum, ).to(dtype=p.dtype) # RMS of pre_grad should be 1.0, so let's cap at 1.1 pre_grad.mul_(torch.clamp(1.1 / (pre_grad.square().mean().sqrt_() + 1e-8), max=1.0)) if flattened: pre_grad = pre_grad.view(p.shape) # Apply weight decay weight_decay = group["weight_decay"] if weight_decay != 0: if group["stochastic_weight_decay"]: weight_decay = 2 * self.rng.random() * weight_decay if group["decoupled_decay"]: if group['corrected_weight_decay']: wd_scale = group["lr"] ** 2 / self.defaults['lr'] else: wd_scale = group["lr"] p.mul_(1. - wd_scale * weight_decay) else: pre_grad.add_(p, alpha=weight_decay) # Update parameters p.add_(pre_grad, alpha=-group["lr"]) if total_momentum_size > 0: _logger.info(f"PSGD Momentum buffer size: {total_momentum_size} elements, {total_momentum_mb:.2f} MB") _logger.info(f"PSGD Preconditioners size: {total_precond_size} elements, {total_precond_mb:.2f} MB") return loss def safe_flatten(tensor, start_dim=0, end_dim=-1): ndim = tensor.ndim # Convert negative end_dim to positive and clip to end end_dim = min(end_dim if end_dim >= 0 else ndim + end_dim, ndim - 1) # If tensor has fewer dims than start_dim or start > end, return tensor as is if ndim <= start_dim or start_dim > end_dim: return tensor # Now safe to flatten return tensor.flatten(start_dim, end_dim) def _init_Q_exprs( t, scale, max_size, min_ndim_triangular, memory_save_mode, dtype=None, init_q=True, ): """For a scalar or tensor t, we initialize its preconditioner Q and reusable einsum expressions for updating Q and preconditioning gradient. """ letters = string.ascii_lowercase + string.ascii_uppercase dtype = dtype if dtype is not None else t.dtype shape = t.shape Q = [] if len(shape) == 0: # scalar if init_q: Q.append(scale * torch.ones_like(t, dtype=dtype)) exprA = ",->" exprGs = [",->"] exprP = ",,->" else: # tensor if len(shape) > 13: raise ValueError(f"Got tensor with dim {len(t.shape)}; Einstein runs out of letters!") scale = scale ** (1 / len(shape)) if memory_save_mode is None: dim_diag = [False for _ in shape] elif memory_save_mode == "one_diag": rev_sorted_dims = np.argsort(shape)[::-1] dim_diag = [False for _ in shape] dim_diag[rev_sorted_dims[0]] = True elif memory_save_mode == "smart_one_diag": # addition proposed by Lucas Nestler rev_sorted_dims = np.argsort(shape)[::-1] sorted_shape = sorted(shape) dim_diag = [False for _ in shape] if len(shape) >= 2 and sorted_shape[-1] > sorted_shape[-2]: dim_diag[rev_sorted_dims[0]] = True elif memory_save_mode == "all_diag": dim_diag = [True for _ in shape] else: raise ValueError( f"Invalid memory_save_mode: {memory_save_mode}, must be one of [None, 'one_diag', 'all_diag']") piece1A, piece2A, piece3A = ([], "", "") exprGs = [] piece1P, piece2P, piece3P, piece4P = ([], [], "", "") for i, (size, dim_d) in enumerate(zip(shape, dim_diag)): if ( size == 1 or size > max_size or len(shape) < min_ndim_triangular or dim_d ): # use diagonal matrix as preconditioner for this dim if init_q: Q.append(scale * torch.ones(size, dtype=dtype, device=t.device)) piece1A.append(letters[i]) piece2A = piece2A + letters[i] piece3A = piece3A + letters[i] piece1 = "".join([letters[i + 13] if j == i else letters[j] for j in range(len(shape))]) subscripts = piece1 + "," + piece1 + "->" + letters[i + 13] exprGs.append(subscripts) piece1P.append(letters[i + 13]) piece2P.append(letters[i + 13]) piece3P = piece3P + letters[i + 13] piece4P = piece4P + letters[i + 13] else: # use triangular matrix as preconditioner for this dim if init_q: Q.append(scale * torch.eye(size, dtype=dtype, device=t.device)) piece1A.append(letters[i] + letters[i + 13]) piece2A = piece2A + letters[i + 13] piece3A = piece3A + letters[i] piece1 = "".join([letters[i + 13] if j == i else letters[j] for j in range(len(shape))]) piece2 = "".join([letters[i + 26] if j == i else letters[j] for j in range(len(shape))]) subscripts = piece1 + "," + piece2 + "->" + letters[i + 13] + letters[i + 26] exprGs.append(subscripts) a, b, c = (letters[i], letters[i + 13], letters[i + 26]) piece1P.append(a + b) piece2P.append(a + c) piece3P = piece3P + c piece4P = piece4P + b exprA = ",".join(piece1A) + "," + piece2A + "->" + piece3A exprP = ",".join(piece1P) + "," + ",".join(piece2P) + "," + piece3P + "->" + piece4P exprGs = tuple(exprGs) if init_q: return [Q, (exprA, exprGs, exprP)] else: return exprA, exprGs, exprP def _lb(A, max_abs): A = A / max_abs aa = torch.real(A * A.conj()) value0, i = torch.max(torch.sum(aa, dim=0), 0) value1, j = torch.max(torch.sum(aa, dim=1), 0) if value0 > value1: x = A[:, i].conj() @ A return max_abs * torch.linalg.vector_norm((x / torch.linalg.vector_norm(x)) @ A.H) else: x = A @ A[j].conj() return max_abs * torch.linalg.vector_norm(A.H @ (x / torch.linalg.vector_norm(x))) def _norm_lower_bound(A): """Cheap lower bound for the spectral norm of A.""" max_abs = A.norm(float("inf")) return torch.where(max_abs > 0, _lb(A, max_abs), max_abs) def _solve_triangular_right(X, A): """X @ inv(A)""" orig_dtype = X.dtype X = X.to(dtype=torch.float32) A = A.to(dtype=torch.float32) out = torch.linalg.solve_triangular(A, X.reshape(-1, X.size(-1)), upper=True, left=False).reshape_as(X) return out.to(dtype=orig_dtype) def _balance_Q(Q_in): norms = torch.stack([q.norm(float("inf")) for q in Q_in]) geometric_mean = norms.prod() ** (1 / len(Q_in)) norms = geometric_mean / norms for i, q in enumerate(Q_in): q.mul_(norms[i]) def _precond_grad(Q, exprs, G): """Precondition gradient G with preconditioner Q.""" return torch.einsum(exprs[-1], *[q.conj() for q in Q], *Q, G) def _calc_A_and_conjB(exprA, G, Q, V): A = torch.einsum(exprA, *Q, G) order = G.dim() p = tuple(range(order)) conjB = torch.permute(V.conj(), p[1:] + p[:1]) for i, q in enumerate(Q): conjB = conjB / q if q.dim() < 2 else _solve_triangular_right(conjB, q) if i < order - 1: conjB = torch.transpose(conjB, i, order - 1) return A, conjB def _q_terms(exprGs, A, conjB): terms = [] for exprG in exprGs: term1 = torch.einsum(exprG, A, A.conj()) term2 = torch.einsum(exprG, conjB.conj(), conjB) terms.append((term1, term2)) return terms
pytorch-image-models/timm/optim/kron.py/0
{ "file_path": "pytorch-image-models/timm/optim/kron.py", "repo_id": "pytorch-image-models", "token_count": 11053 }
240
""" Batch size decay and retry helpers. Copyright 2022 Ross Wightman """ import math def decay_batch_step(batch_size, num_intra_steps=2, no_odd=False): """ power of two batch-size decay with intra steps Decay by stepping between powers of 2: * determine power-of-2 floor of current batch size (base batch size) * divide above value by num_intra_steps to determine step size * floor batch_size to nearest multiple of step_size (from base batch size) Examples: num_steps == 4 --> 64, 56, 48, 40, 32, 28, 24, 20, 16, 14, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1 num_steps (no_odd=True) == 4 --> 64, 56, 48, 40, 32, 28, 24, 20, 16, 14, 12, 10, 8, 6, 4, 2 num_steps == 2 --> 64, 48, 32, 24, 16, 12, 8, 6, 4, 3, 2, 1 num_steps == 1 --> 64, 32, 16, 8, 4, 2, 1 """ if batch_size <= 1: # return 0 for stopping value so easy to use in loop return 0 base_batch_size = int(2 ** (math.log(batch_size - 1) // math.log(2))) step_size = max(base_batch_size // num_intra_steps, 1) batch_size = base_batch_size + ((batch_size - base_batch_size - 1) // step_size) * step_size if no_odd and batch_size % 2: batch_size -= 1 return batch_size def check_batch_size_retry(error_str): """ check failure error string for conditions where batch decay retry should not be attempted """ error_str = error_str.lower() if 'required rank' in error_str: # Errors involving phrase 'required rank' typically happen when a conv is used that's # not compatible with channels_last memory format. return False if 'illegal' in error_str: # 'Illegal memory access' errors in CUDA typically leave process in unusable state return False return True
pytorch-image-models/timm/utils/decay_batch.py/0
{ "file_path": "pytorch-image-models/timm/utils/decay_batch.py", "repo_id": "pytorch-image-models", "token_count": 656 }
241
# How do multi-step agents work? The ReAct framework ([Yao et al., 2022](https://huggingface.co/papers/2210.03629)) is currently the main approach to building agents. The name is based on the concatenation of two words, "Reason" and "Act." Indeed, agents following this architecture will solve their task in as many steps as needed, each step consisting of a Reasoning step, then an Action step where it formulates tool calls that will bring it closer to solving the task at hand. All agents in `smolagents` are based on singular `MultiStepAgent` class, which is an abstraction of ReAct framework. On a basic level, this class performs actions on a cycle of following steps, where existing variables and knowledge is incorporated into the agent logs like below: Initialization: the system prompt is stored in a `SystemPromptStep`, and the user query is logged into a `TaskStep` . While loop (ReAct loop): - Use `agent.write_memory_to_messages()` to write the agent logs into a list of LLM-readable [chat messages](https://huggingface.co/docs/transformers/en/chat_templating). - Send these messages to a `Model` object to get its completion. Parse the completion to get the action (a JSON blob for `ToolCallingAgent`, a code snippet for `CodeAgent`). - Execute the action and logs result into memory (an `ActionStep`). - At the end of each step, we run all callback functions defined in `agent.step_callbacks` . Optionally, when planning is activated, a plan can be periodically revised and stored in a `PlanningStep` . This includes feeding facts about the task at hand to the memory. For a `CodeAgent`, it looks like the figure below. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/codeagent_docs.png" /> </div> Here is a video overview of how that works: <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif" /> </div> We implement two versions of agents: - [`CodeAgent`] generates its tool calls as Python code snippets. - [`ToolCallingAgent`] writes its tool calls as JSON, as is common in many frameworks. Depending on your needs, either approach can be used. For instance, web browsing often requires waiting after each page interaction, so JSON tool calls can fit well. > [!TIP] > Read [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) blog post to learn more about multi-step agents.
smolagents/docs/source/en/conceptual_guides/react.md/0
{ "file_path": "smolagents/docs/source/en/conceptual_guides/react.md", "repo_id": "smolagents", "token_count": 807 }
242
# Inspecting runs with OpenTelemetry [[open-in-colab]] > [!TIP] > If you're new to building agents, make sure to first read the [intro to agents](../conceptual_guides/intro_agents) and the [guided tour of smolagents](../guided_tour). ## Why log your agent runs? Agent runs are complicated to debug. Validating that a run went properly is hard, since agent workflows are [unpredictable by design](../conceptual_guides/intro_agents) (if they were predictable, you'd just be using good old code). And inspecting a run is hard as well: multi-step agents tend to quickly fill a console with logs, and most of the errors are just "LLM dumb" kind of errors, from which the LLM auto-corrects in the next step by writing better code or tool calls. So using instrumentation to record agent runs is necessary in production for later inspection and monitoring! We've adopted the [OpenTelemetry](https://opentelemetry.io/) standard for instrumenting agent runs. This means that you can just run some instrumentation code, then run your agents normally, and everything gets logged into your platform. Below are some examples of how to do this with different OpenTelemetry backends. Here's how it then looks like on the platform: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/inspect_run_phoenix.gif"/> </div> ## Setting up telemetry with Arize AI Phoenix First install the required packages. Here we install [Phoenix by Arize AI](https://github.com/Arize-ai/phoenix) because that's a good solution to collect and inspect the logs, but there are other OpenTelemetry-compatible platforms that you could use for this collection & inspection part. ```shell pip install 'smolagents[telemetry,toolkit]' ``` Then run the collector in the background. ```shell python -m phoenix.server.main serve ``` Finally, set up `SmolagentsInstrumentor` to trace your agents and send the traces to Phoenix default endpoint. ```python from phoenix.otel import register from openinference.instrumentation.smolagents import SmolagentsInstrumentor register() SmolagentsInstrumentor().instrument() ``` Then you can run your agents! ```py from smolagents import ( CodeAgent, ToolCallingAgent, WebSearchTool, VisitWebpageTool, InferenceClientModel, ) model = InferenceClientModel() search_agent = ToolCallingAgent( tools=[WebSearchTool(), VisitWebpageTool()], model=model, name="search_agent", description="This is an agent that can do web search.", ) manager_agent = CodeAgent( tools=[], model=model, managed_agents=[search_agent], ) manager_agent.run( "If the US keeps its 2024 growth rate, how many years will it take for the GDP to double?" ) ``` Voilà! You can then navigate to `http://0.0.0.0:6006/projects/` to inspect your run! <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/inspect_run_phoenix.png"> You can see that the CodeAgent called its managed ToolCallingAgent (by the way, the managed agent could have been a CodeAgent as well) to ask it to run the web search for the U.S. 2024 growth rate. Then the managed agent returned its report and the manager agent acted upon it to calculate the economy doubling time! Sweet, isn't it? ## Setting up telemetry with 🪢 Langfuse This part shows how to monitor and debug your Hugging Face **smolagents** with **Langfuse** using the `SmolagentsInstrumentor`. > **What is Langfuse?** [Langfuse](https://langfuse.com) is an open-source platform for LLM engineering. It provides tracing and monitoring capabilities for AI agents, helping developers debug, analyze, and optimize their products. Langfuse integrates with various tools and frameworks via native integrations, OpenTelemetry, and SDKs. ### Step 1: Install Dependencies ```python %pip install langfuse 'smolagents[telemetry]' openinference-instrumentation-smolagents ``` ### Step 2: Set Up Environment Variables Set your Langfuse API keys and configure the OpenTelemetry endpoint to send traces to Langfuse. Get your Langfuse API keys by signing up for [Langfuse Cloud](https://cloud.langfuse.com) or [self-hosting Langfuse](https://langfuse.com/self-hosting). Also, add your [Hugging Face token](https://huggingface.co/settings/tokens) (`HF_TOKEN`) as an environment variable. ```python import os # Get keys for your project from the project settings page: https://cloud.langfuse.com os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-..." os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-..." os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # 🇪🇺 EU region # os.environ["LANGFUSE_HOST"] = "https://us.cloud.langfuse.com" # 🇺🇸 US region # your Hugging Face token os.environ["HF_TOKEN"] = "hf_..." ``` With the environment variables set, we can now initialize the Langfuse client. `get_client()` initializes the Langfuse client using the credentials provided in the environment variables. ```python from langfuse import get_client langfuse = get_client() # Verify connection if langfuse.auth_check(): print("Langfuse client is authenticated and ready!") else: print("Authentication failed. Please check your credentials and host.") ``` ### Step 3: Initialize the `SmolagentsInstrumentor` Initialize the `SmolagentsInstrumentor` before your application code. ```python from openinference.instrumentation.smolagents import SmolagentsInstrumentor SmolagentsInstrumentor().instrument() ``` ### Step 4: Run your smolagent ```python from smolagents import ( CodeAgent, ToolCallingAgent, WebSearchTool, VisitWebpageTool, InferenceClientModel, ) model = InferenceClientModel( model_id="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" ) search_agent = ToolCallingAgent( tools=[WebSearchTool(), VisitWebpageTool()], model=model, name="search_agent", description="This is an agent that can do web search.", ) manager_agent = CodeAgent( tools=[], model=model, managed_agents=[search_agent], ) manager_agent.run( "How can Langfuse be used to monitor and improve the reasoning and decision-making of smolagents when they execute multi-step tasks, like dynamically adjusting a recipe based on user feedback or available ingredients?" ) ``` ### Step 5: View Traces in Langfuse After running the agent, you can view the traces generated by your smolagents application in [Langfuse](https://cloud.langfuse.com). You should see detailed steps of the LLM interactions, which can help you debug and optimize your AI agent. ![smolagents example trace](https://langfuse.com/images/cookbook/integration-smolagents/smolagent_example_trace.png) _[Public example trace in Langfuse](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/ce5160f9bfd5a6cd63b07d2bfcec6f54?timestamp=2025-02-11T09%3A25%3A45.163Z&display=details)_
smolagents/docs/source/en/tutorials/inspect_runs.md/0
{ "file_path": "smolagents/docs/source/en/tutorials/inspect_runs.md", "repo_id": "smolagents", "token_count": 2101 }
243
# OpenTelemetry के साथ runs का निरीक्षण [[open-in-colab]] > [!TIP] > यदि आप एजेंट्स बनाने में नए हैं, तो पहले [एजेंट्स का परिचय](../conceptual_guides/intro_agents) और [smolagents की गाइडेड टूर](../guided_tour) पढ़ना सुनिश्चित करें। ### Agents runs को लॉग क्यों करें? Agent runs को डीबग करना जटिल होता है। यह सत्यापित करना कठिन है कि एक रन ठीक से चला या नहीं, क्योंकि एजेंट वर्कफ़्लो [डिज़ाइन के अनुसार अप्रत्याशित](../conceptual_guides/intro_agents) होते हैं (यदि वे प्रत्याशित होते, तो आप पुराने अच्छे कोड का ही उपयोग कर रहे होते)। और रन का निरीक्षण करना भी कठिन है: मल्टी-स्टेप एजेंट्स जल्दी ही कंसोल को लॉग से भर देते हैं, और अधिकांश त्रुटियां केवल "LLM dumb" प्रकार की त्रुटियां होती हैं, जिनसे LLM अगले चरण में बेहतर कोड या टूल कॉल लिखकर स्वयं को सुधार लेता है। इसलिए बाद के निरीक्षण और मॉनिटरिंग के लिए प्रोडक्शन में agent runs को रिकॉर्ड करने के लिए इंस्ट्रुमेंटेशन का उपयोग करना आवश्यक है! हमने agent runs को इंस्ट्रुमेंट करने के लिए [OpenTelemetry](https://opentelemetry.io/) मानक को अपनाया है। इसका मतलब है कि आप बस कुछ इंस्ट्रुमेंटेशन कोड चला सकते हैं, फिर अपने एजेंट्स को सामान्य रूप से चला सकते हैं, और सब कुछ आपके प्लेटफॉर्म में लॉग हो जाता है। यह इस प्रकार होता है: पहले आवश्यक पैकेज इंस्टॉल करें। यहां हम [Phoenix by Arize AI](https://github.com/Arize-ai/phoenix) इंस्टॉल करते हैं क्योंकि यह लॉग्स को एकत्र और निरीक्षण करने का एक अच्छा समाधान है, लेकिन इस संग्रह और निरीक्षण भाग के लिए आप अन्य OpenTelemetry-कम्पैटिबल प्लेटफॉर्म्स का उपयोग कर सकते हैं। ```shell pip install smolagents pip install arize-phoenix opentelemetry-sdk opentelemetry-exporter-otlp openinference-instrumentation-smolagents ``` फिर कलेक्टर को बैकग्राउंड में चलाएं। ```shell python -m phoenix.server.main serve ``` अंत में, अपने एजेंट्स को ट्रेस करने और ट्रेस को नीचे परिभाषित एंडपॉइंट पर Phoenix को भेजने के लिए `SmolagentsInstrumentor` को सेट करें। ```python from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor from openinference.instrumentation.smolagents import SmolagentsInstrumentor from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor endpoint = "http://0.0.0.0:6006/v1/traces" trace_provider = TracerProvider() trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint))) SmolagentsInstrumentor().instrument(tracer_provider=trace_provider) ``` तब आप अपने एजेंट चला सकते हैं! ```py from smolagents import ( CodeAgent, ToolCallingAgent, WebSearchTool, VisitWebpageTool, InferenceClientModel, ) model = InferenceClientModel() managed_agent = ToolCallingAgent( tools=[WebSearchTool(), VisitWebpageTool()], model=model, name="managed_agent", description="This is an agent that can do web search.", ) manager_agent = CodeAgent( tools=[], model=model, managed_agents=[managed_agent], ) manager_agent.run( "If the US keeps its 2024 growth rate, how many years will it take for the GDP to double?" ) ``` और फिर आप अपने रन का निरीक्षण करने के लिए `http://0.0.0.0:6006/projects/` पर जा सकते हैं! <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/inspect_run_phoenix.png"> आप देख सकते हैं कि CodeAgent ने अपने मैनेज्ड ToolCallingAgent को (वैसे, मैनेज्ड एजेंट एक CodeAgent भी हो सकता था) U.S. 2024 ग्रोथ रेट के लिए वेब सर्च चलाने के लिए कॉल किया। फिर मैनेज्ड एजेंट ने अपनी रिपोर्ट लौटाई और मैनेजर एजेंट ने अर्थव्यवस्था के दोगुना होने का समय गणना करने के लिए उस पर कार्य किया! अच्छा है, है ना?
smolagents/docs/source/hi/tutorials/inspect_runs.md/0
{ "file_path": "smolagents/docs/source/hi/tutorials/inspect_runs.md", "repo_id": "smolagents", "token_count": 3246 }
244
# Agents - 导览 [[open-in-colab]] 在本导览中,您将学习如何构建一个 agent(智能体),如何运行它,以及如何自定义它以使其更好地适应您的使用场景。 > [!TIP] > 译者注:Agent 的业内术语是“智能体”。本译文将保留 agent,不作翻译,以带来更高效的阅读体验。(在中文为主的文章中,It's easier to 注意到英文。Attention Is All You Need!) > [!TIP] > 中文社区发布了关于 smolagents 的介绍和实践讲解视频(来源:[Issue#80](https://github.com/huggingface/smolagents/issues/80)),你可以访问[这里](https://www.youtube.com/watch?v=wwN3oAugc4c)进行观看! ### 构建您的 agent 要初始化一个最小化的 agent,您至少需要以下两个参数: - `model`,一个为您的 agent 提供动力的文本生成模型 - 因为 agent 与简单的 LLM 不同,它是一个使用 LLM 作为引擎的系统。您可以使用以下任一选项: - [`TransformersModel`] 使用预初始化的 `transformers` 管道在本地机器上运行推理 - [`InferenceClientModel`] 在底层使用 `huggingface_hub.InferenceClient` - [`LiteLLMModel`] 让您通过 [LiteLLM](https://docs.litellm.ai/) 调用 100+ 不同的模型! - [`AzureOpenAIServerModel`] 允许您使用部署在 [Azure](https://azure.microsoft.com/en-us/products/ai-services/openai-service) 中的 OpenAI 模型。 - [`MLXModel`] 可创建 [mlx-lm](https://pypi.org/project/mlx-lm/) 流水线,以便在本地机器上运行推理。 - `tools`,agent 可以用来解决任务的 `Tools` 列表。它可以是一个空列表。您还可以通过定义可选参数 `add_base_tools=True` 在您的 `tools` 列表之上添加默认工具箱。 一旦有了这两个参数 `tools` 和 `model`,您就可以创建一个 agent 并运行它。您可以使用任何您喜欢的 LLM,无论是通过 [Hugging Face API](https://huggingface.co/docs/api-inference/en/index)、[transformers](https://github.com/huggingface/transformers/)、[ollama](https://ollama.com/)、[LiteLLM](https://www.litellm.ai/)、[Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service),还是[mlx-lm](https://pypi.org/project/mlx-lm/).。 <hfoptions id="选择一个LLM"> <hfoption id="Hugging Face API"> Hugging Face API 可以免费使用而无需 token,但会有速率限制。 要访问受限模型或使用 PRO 账户提高速率限制,您需要设置环境变量 `HF_TOKEN` 或在初始化 `InferenceClientModel` 时传递 `token` 变量。 ```python from smolagents import CodeAgent, InferenceClientModel model_id = "meta-llama/Llama-3.3-70B-Instruct" model = InferenceClientModel(model_id=model_id, token="<YOUR_HUGGINGFACEHUB_API_TOKEN>") agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.run( "Could you give me the 118th number in the Fibonacci sequence?", ) ``` </hfoption> <hfoption id="本地Transformers模型"> ```python # !pip install smolagents[transformers] from smolagents import CodeAgent, TransformersModel model_id = "meta-llama/Llama-3.2-3B-Instruct" model = TransformersModel(model_id=model_id) agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.run( "Could you give me the 118th number in the Fibonacci sequence?", ) ``` </hfoption> <hfoption id="OpenAI或Anthropic API"> 要使用 `LiteLLMModel`,您需要设置环境变量 `ANTHROPIC_API_KEY` 或 `OPENAI_API_KEY`,或者在初始化时传递 `api_key` 变量。 ```python # !pip install smolagents[litellm] from smolagents import CodeAgent, LiteLLMModel model = LiteLLMModel(model_id="anthropic/claude-3-5-sonnet-latest", api_key="YOUR_ANTHROPIC_API_KEY") # 也可以使用 'gpt-4o' agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.run( "Could you give me the 118th number in the Fibonacci sequence?", ) ``` </hfoption> <hfoption id="Ollama"> ```python # !pip install smolagents[litellm] from smolagents import CodeAgent, LiteLLMModel model = LiteLLMModel( model_id="ollama_chat/llama3.2", # 这个模型对于 agent 行为来说有点弱 api_base="http://localhost:11434", # 如果需要可以替换为远程 open-ai 兼容服务器 api_key="YOUR_API_KEY" # 如果需要可以替换为 API key num_ctx=8192 # https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator ) agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.run( "Could you give me the 118th number in the Fibonacci sequence?", ) ``` </hfoption> <hfoption id="Azure OpenAI"> 要连接到 Azure OpenAI,您可以直接使用 `AzureOpenAIServerModel`,或使用 `LiteLLMModel` 并进行相应配置。 初始化 `AzureOpenAIServerModel` 实例时,需要传递模型部署名称,可选择以下任一种方式:1.传递 `azure_endpoint`、`api_key` 和 `api_version` 参数;2.设置环境变量 `AZURE_OPENAI_ENDPOINT`、`AZURE_OPENAI_API_KEY` 和 `OPENAI_API_VERSION` ```python # !pip install smolagents[openai] from smolagents import CodeAgent, AzureOpenAIServerModel model = AzureOpenAIServerModel(model_id="gpt-4o-mini") agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.run( "Could you give me the 118th number in the Fibonacci sequence?", ) ``` 也可按如下方式配置 `LiteLLMModel` 连接 Azure OpenAI: - 将模型部署名称作为 `model_id` 参数传递,并确保其前缀为 `azure/` - 确保设置环境变量 `AZURE_API_VERSION` - 任选其一:1.传递 `api_base` 和 `api_key` 参数;2.设置环境变量 `AZURE_API_KEY` 和 `AZURE_API_BASE` ```python import os from smolagents import CodeAgent, LiteLLMModel AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="gpt-35-turbo-16k-deployment" # example of deployment name os.environ["AZURE_API_KEY"] = "" # api_key os.environ["AZURE_API_BASE"] = "" # "https://example-endpoint.openai.azure.com" os.environ["AZURE_API_VERSION"] = "" # "2024-10-01-preview" model = LiteLLMModel(model_id="azure/" + AZURE_OPENAI_CHAT_DEPLOYMENT_NAME) agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.run( "Could you give me the 118th number in the Fibonacci sequence?", ) ``` </hfoption> <hfoption id="mlx-lm"> ```python # !pip install smolagents[mlx-lm] from smolagents import CodeAgent, MLXModel mlx_model = MLXModel("mlx-community/Qwen2.5-Coder-32B-Instruct-4bit") agent = CodeAgent(model=mlx_model, tools=[], add_base_tools=True) agent.run("Could you give me the 118th number in the Fibonacci sequence?") ``` </hfoption> </hfoptions> #### CodeAgent 和 ToolCallingAgent [`CodeAgent`] 是我们的默认 agent。它将在每一步编写并执行 Python 代码片段。 默认情况下,执行是在您的本地环境中完成的。 这应该是安全的,因为唯一可以调用的函数是您提供的工具(特别是如果只有 Hugging Face 的工具)和一组预定义的安全函数,如 `print` 或 `math` 模块中的函数,所以您已经限制了可以执行的内容。 Python 解释器默认也不允许在安全列表之外导入,所以所有最明显的攻击都不应该成为问题。 您可以通过在初始化 [`CodeAgent`] 时将授权模块作为字符串列表传递给参数 `additional_authorized_imports` 来授权额外的导入: ```py from smolagents import CodeAgent model = InferenceClientModel() agent = CodeAgent(tools=[], model=model, additional_authorized_imports=['requests', 'bs4']) agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?") ``` > [!WARNING] > LLM 可以生成任意代码然后执行:不要添加任何不安全的导入! 如果生成的代码尝试执行非法操作或出现常规 Python 错误,执行将停止。 您也可以使用 [E2B 代码执行器](https://e2b.dev/docs#what-is-e2-b) 或 Docker 而不是本地 Python 解释器。对于 E2B,首先 [设置 `E2B_API_KEY` 环境变量](https://e2b.dev/dashboard?tab=keys),然后在初始化 agent 时传递 `executor_type="e2b"`。对于 Docker,在初始化时传递 `executor_type="docker"`。 > [!TIP] > 在 [该教程中](tutorials/secure_code_execution) 了解更多关于代码执行的内容。 我们还支持广泛使用的将动作编写为 JSON-like 块的方式:[`ToolCallingAgent`],它的工作方式与 [`CodeAgent`] 非常相似,当然没有 `additional_authorized_imports`,因为它不执行代码: ```py from smolagents import ToolCallingAgent agent = ToolCallingAgent(tools=[], model=model) agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?") ``` ### 检查 agent 运行 以下是一些有用的属性,用于检查运行后发生了什么: - `agent.logs` 存储 agent 的细粒度日志。在 agent 运行的每一步,所有内容都会存储在一个字典中,然后附加到 `agent.logs` 中。 - 运行 `agent.write_memory_to_messages()` 会为 LLM 创建一个 agent 日志的内部内存,作为聊天消息列表。此方法会遍历日志的每一步,并仅存储它感兴趣的内容作为消息:例如,它会将系统提示和任务存储为单独的消息,然后对于每一步,它会将 LLM 输出存储为一条消息,工具调用输出存储为另一条消息。如果您想要更高级别的视图 - 但不是每个日志都会被此方法转录。 ## 工具 工具是 agent 使用的原子函数。为了被 LLM 使用,它还需要一些构成其 API 的属性,这些属性将用于向 LLM 描述如何调用此工具: - 名称 - 描述 - 输入类型和描述 - 输出类型 例如,您可以查看 [`PythonInterpreterTool`]:它有一个名称、描述、输入描述、输出类型和一个执行操作的 `forward` 方法。 当 agent 初始化时,工具属性用于生成工具描述,该描述被嵌入到 agent 的系统提示中。这让 agent 知道它可以使用哪些工具以及为什么。 ### 默认工具箱 `smolagents` 附带了一个用于增强 agent 的默认工具箱,您可以在初始化时通过参数 `add_base_tools=True` 将其添加到您的 agent 中: - **DuckDuckGo 网页搜索**:使用 DuckDuckGo 浏览器执行网页搜索。 - **Python 代码解释器**:在安全环境中运行 LLM 生成的 Python 代码。只有在使用 `add_base_tools=True` 初始化 [`ToolCallingAgent`] 时才会添加此工具,因为基于代码的 agent 已经可以原生执行 Python 代码 - **转录器**:基于 Whisper-Turbo 构建的语音转文本管道,将音频转录为文本。 您可以通过调用 [`load_tool`] 函数和要执行的任务手动使用工具。 ```python from smolagents import WebSearchTool search_tool = WebSearchTool() print(search_tool("Who's the current president of Russia?")) ``` ### 创建一个新工具 您可以创建自己的工具,用于 Hugging Face 默认工具未涵盖的用例。 例如,让我们创建一个工具,返回 Hub 上给定任务下载量最多的模型。 您将从以下代码开始。 ```python from huggingface_hub import list_models task = "text-classification" most_downloaded_model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) print(most_downloaded_model.id) ``` 这段代码可以通过将其包装在一个函数中并添加 `tool` 装饰器快速转换为工具: 这不是构建工具的唯一方法:您可以直接将其定义为 [`Tool`] 的子类,这为您提供了更多的灵活性,例如初始化重型类属性的可能性。 让我们看看这两种选项的工作原理: <hfoptions id="构建工具"> <hfoption id="使用@tool装饰一个函数"> ```py from smolagents import tool @tool def model_download_tool(task: str) -> str: """ This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. It returns the name of the checkpoint. Args: task: The task for which to get the download count. """ most_downloaded_model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) return most_downloaded_model.id ``` 该函数需要: - 一个清晰的名称。名称应该足够描述此工具的功能,以帮助为 agent 提供动力的 LLM。由于此工具返回任务下载量最多的模型,我们将其命名为 `model_download_tool`。 - 输入和输出的类型提示 - 一个描述,其中包括一个 'Args:' 部分,其中每个参数都被描述(这次没有类型指示,它将从类型提示中提取)。与工具名称一样,此描述是为您的 agent 提供动力的 LLM 的说明书,所以不要忽视它。 所有这些元素将在初始化时自动嵌入到 agent 的系统提示中:因此要努力使它们尽可能清晰! > [!TIP] > 此定义格式与 `apply_chat_template` 中使用的工具模式相同,唯一的区别是添加了 `tool` 装饰器:[这里](https://huggingface.co/blog/unified-tool-use#passing-tools-to-a-chat-template) 了解更多关于我们的工具使用 API。 </hfoption> <hfoption id="子类化Tool"> ```py from smolagents import Tool class ModelDownloadTool(Tool): name = "model_download_tool" description = "This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. It returns the name of the checkpoint." inputs = {"task": {"type": "string", "description": "The task for which to get the download count."}} output_type = "string" def forward(self, task: str) -> str: most_downloaded_model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) return most_downloaded_model.id ``` 子类需要以下属性: - 一个清晰的 `name`。名称应该足够描述此工具的功能,以帮助为 agent 提供动力的 LLM。由于此工具返回任务下载量最多的模型,我们将其命名为 `model_download_tool`。 - 一个 `description`。与 `name` 一样,此描述是为您的 agent 提供动力的 LLM 的说明书,所以不要忽视它。 - 输入类型和描述 - 输出类型 所有这些属性将在初始化时自动嵌入到 agent 的系统提示中:因此要努力使它们尽可能清晰! </hfoption> </hfoptions> 然后您可以直接初始化您的 agent: ```py from smolagents import CodeAgent, InferenceClientModel agent = CodeAgent(tools=[model_download_tool], model=InferenceClientModel()) agent.run( "Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?" ) ``` 您将获得以下日志: ```text ╭──────────────────────────────────────── New run ─────────────────────────────────────────╮ │ │ │ Can you give me the name of the model that has the most downloads in the 'text-to-video' │ │ task on the Hugging Face Hub? │ │ │ ╰─ InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct ───────────────────────────────────────────╯ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ Step 0 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ ╭─ Executing this code: ───────────────────────────────────────────────────────────────────╮ │ 1 model_name = model_download_tool(task="text-to-video") │ │ 2 print(model_name) │ ╰──────────────────────────────────────────────────────────────────────────────────────────╯ Execution logs: ByteDance/AnimateDiff-Lightning Out: None [Step 0: Duration 0.27 seconds| Input tokens: 2,069 | Output tokens: 60] ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ Step 1 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ ╭─ Executing this code: ───────────────────────────────────────────────────────────────────╮ │ 1 final_answer("ByteDance/AnimateDiff-Lightning") │ ╰──────────────────────────────────────────────────────────────────────────────────────────╯ Out - Final answer: ByteDance/AnimateDiff-Lightning [Step 1: Duration 0.10 seconds| Input tokens: 4,288 | Output tokens: 148] Out[20]: 'ByteDance/AnimateDiff-Lightning' ``` > [!TIP] > 在 [专用教程](./tutorials/tools#what-is-a-tool-and-how-to-build-one) 中了解更多关于工具的内容。 ## 多 agent 多 agent 系统是随着微软的框架 [Autogen](https://huggingface.co/papers/2308.08155) 引入的。 在这种类型的框架中,您有多个 agent 一起工作来解决您的任务,而不是只有一个。 经验表明,这在大多数基准测试中表现更好。这种更好表现的原因在概念上很简单:对于许多任务,与其使用一个全能系统,您更愿意将单元专门用于子任务。在这里,拥有具有单独工具集和内存的 agent 可以实现高效的专业化。例如,为什么要用网页搜索 agent 访问的所有网页内容填充代码生成 agent 的内存?最好将它们分开。 您可以使用 `smolagents` 轻松构建分层多 agent 系统。 为此,将 agent 封装在 [`ManagedAgent`] 对象中。此对象需要参数 `agent`、`name` 和 `description`,这些参数将嵌入到管理 agent 的系统提示中,以让它知道如何调用此托管 agent,就像我们对工具所做的那样。 以下是一个使用我们的 [`WebSearchTool`] 制作一个管理特定网页搜索 agent 的 agent 的示例: ```py from smolagents import CodeAgent, InferenceClientModel, WebSearchTool, ManagedAgent model = InferenceClientModel() web_agent = CodeAgent(tools=[WebSearchTool()], model=model) managed_web_agent = ManagedAgent( agent=web_agent, name="web_search", description="Runs web searches for you. Give it your query as an argument." ) manager_agent = CodeAgent( tools=[], model=model, managed_agents=[managed_web_agent] ) manager_agent.run("Who is the CEO of Hugging Face?") ``` > [!TIP] > 有关高效多 agent 实现的深入示例,请参阅 [我们如何将多 agent 系统推向 GAIA 排行榜的顶部](https://huggingface.co/blog/beating-gaia)。 ## 与您的 agent 交谈并在酷炫的 Gradio 界面中可视化其思考过程 您可以使用 `GradioUI` 交互式地向您的 agent 提交任务并观察其思考和执行过程,以下是一个示例: ```py from smolagents import ( load_tool, CodeAgent, InferenceClientModel, GradioUI ) # 从 Hub 导入工具 image_generation_tool = load_tool("m-ric/text-to-image") model = InferenceClientModel(model_id=model_id) # 使用图像生成工具初始化 agent agent = CodeAgent(tools=[image_generation_tool], model=model) GradioUI(agent).launch() ``` 在底层,当用户输入新答案时,agent 会以 `agent.run(user_request, reset=False)` 启动。 `reset=False` 标志意味着在启动此新任务之前不会刷新 agent 的内存,这使得对话可以继续。 您也可以在其他 agent 化应用程序中使用此 `reset=False` 参数来保持对话继续。 ## 下一步 最后,当您按需配置好agent后,即可将其分享至 Hub! ```py agent.push_to_hub("m-ric/my_agent") ``` 类似地,若要加载已推送至 Hub 的agent,在信任其工具代码的前提下,可使用: ```py agent.from_hub("m-ric/my_agent", trust_remote_code=True) ``` 要更深入地使用,您将需要查看我们的教程: - [我们的代码 agent 如何工作的解释](./tutorials/secure_code_execution) - [本指南关于如何构建好的 agent](./tutorials/building_good_agents)。 - [工具使用的深入指南](./tutorials/tools)。
smolagents/docs/source/zh/guided_tour.md/0
{ "file_path": "smolagents/docs/source/zh/guided_tour.md", "repo_id": "smolagents", "token_count": 10501 }
245
from openinference.instrumentation.smolagents import SmolagentsInstrumentor from phoenix.otel import register register() SmolagentsInstrumentor().instrument(skip_dep_check=True) from smolagents import ( CodeAgent, InferenceClientModel, ToolCallingAgent, VisitWebpageTool, WebSearchTool, ) # Then we run the agentic part! model = InferenceClientModel(provider="nebius") search_agent = ToolCallingAgent( tools=[WebSearchTool(), VisitWebpageTool()], model=model, name="search_agent", description="This is an agent that can do web search.", return_full_result=True, ) manager_agent = CodeAgent( tools=[], model=model, managed_agents=[search_agent], return_full_result=True, ) run_result = manager_agent.run( "If the US keeps it 2024 growth rate, how many years would it take for the GDP to double?" ) print("Here is the token usage for the manager agent", run_result.token_usage) print("Here are the timing informations for the manager agent:", run_result.timing)
smolagents/examples/inspect_multiagent_run.py/0
{ "file_path": "smolagents/examples/inspect_multiagent_run.py", "repo_id": "smolagents", "token_count": 335 }
246
import base64 import json import mimetypes import os import uuid from io import BytesIO import PIL.Image import requests from dotenv import load_dotenv from huggingface_hub import InferenceClient from smolagents import Tool, tool load_dotenv(override=True) def process_images_and_text(image_path, query, client): from transformers import AutoProcessor messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": query}, ], }, ] idefics_processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b-chatty") prompt_with_template = idefics_processor.apply_chat_template(messages, add_generation_prompt=True) # load images from local directory # encode images to strings which can be sent to the endpoint def encode_local_image(image_path): # load image image = PIL.Image.open(image_path).convert("RGB") # Convert the image to a base64 string buffer = BytesIO() image.save(buffer, format="JPEG") # Use the appropriate format (e.g., JPEG, PNG) base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8") # add string formatting required by the endpoint image_string = f"data:image/jpeg;base64,{base64_image}" return image_string image_string = encode_local_image(image_path) prompt_with_images = prompt_with_template.replace("<image>", "![]({}) ").format(image_string) payload = { "inputs": prompt_with_images, "parameters": { "return_full_text": False, "max_new_tokens": 200, }, } return json.loads(client.post(json=payload).decode())[0] # Function to encode the image def encode_image(image_path): if image_path.startswith("http"): user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0" request_kwargs = { "headers": {"User-Agent": user_agent}, "stream": True, } # Send a HTTP request to the URL response = requests.get(image_path, **request_kwargs) response.raise_for_status() content_type = response.headers.get("content-type", "") extension = mimetypes.guess_extension(content_type) if extension is None: extension = ".download" fname = str(uuid.uuid4()) + extension download_path = os.path.abspath(os.path.join("downloads", fname)) with open(download_path, "wb") as fh: for chunk in response.iter_content(chunk_size=512): fh.write(chunk) image_path = download_path with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()).decode("utf-8") def resize_image(image_path): img = PIL.Image.open(image_path) width, height = img.size img = img.resize((int(width / 2), int(height / 2))) new_image_path = f"resized_{image_path}" img.save(new_image_path) return new_image_path class VisualQATool(Tool): name = "visualizer" description = "A tool that can answer questions about attached images." inputs = { "image_path": { "description": "The path to the image on which to answer the question", "type": "string", }, "question": {"description": "the question to answer", "type": "string", "nullable": True}, } output_type = "string" client = InferenceClient("HuggingFaceM4/idefics2-8b-chatty") def forward(self, image_path: str, question: str | None = None) -> str: output = "" add_note = False if not question: add_note = True question = "Please write a detailed caption for this image." try: output = process_images_and_text(image_path, question, self.client) except Exception as e: print(e) if "Payload Too Large" in str(e): new_image_path = resize_image(image_path) output = process_images_and_text(new_image_path, question, self.client) if add_note: output = ( f"You did not provide a particular question, so here is a detailed caption for the image: {output}" ) return output @tool def visualizer(image_path: str, question: str | None = None) -> str: """A tool that can answer questions about attached images. Args: image_path: The path to the image on which to answer the question. This should be a local path to downloaded image. question: The question to answer. """ import mimetypes import os import requests from .visual_qa import encode_image add_note = False if not question: add_note = True question = "Please write a detailed caption for this image." if not isinstance(image_path, str): raise Exception("You should provide at least `image_path` string argument to this tool!") mime_type, _ = mimetypes.guess_type(image_path) base64_image = encode_image(image_path) payload = { "model": "gpt-4o", "messages": [ { "role": "user", "content": [ {"type": "text", "text": question}, {"type": "image_url", "image_url": {"url": f"data:{mime_type};base64,{base64_image}"}}, ], } ], "max_tokens": 1000, } headers = {"Content-Type": "application/json", "Authorization": f"Bearer {os.getenv('OPENAI_API_KEY')}"} response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload) try: output = response.json()["choices"][0]["message"]["content"] except Exception: raise Exception(f"Response format unexpected: {response.json()}") if add_note: output = f"You did not provide a particular question, so here is a detailed caption for the image: {output}" return output
smolagents/examples/open_deep_research/scripts/visual_qa.py/0
{ "file_path": "smolagents/examples/open_deep_research/scripts/visual_qa.py", "repo_id": "smolagents", "token_count": 2558 }
247
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import pathlib import tempfile import uuid from io import BytesIO from typing import Any import PIL.Image import requests from .utils import _is_package_available logger = logging.getLogger(__name__) class AgentType: """ Abstract class to be reimplemented to define types that can be returned by agents. These objects serve three purposes: - They behave as they were the type they're meant to be, e.g., a string for text, a PIL.Image.Image for images - They can be stringified: str(object) in order to return a string defining the object - They should be displayed correctly in ipython notebooks/colab/jupyter """ def __init__(self, value): self._value = value def __str__(self): return self.to_string() def to_raw(self): logger.error( "This is a raw AgentType of unknown type. Display in notebooks and string conversion will be unreliable" ) return self._value def to_string(self) -> str: logger.error( "This is a raw AgentType of unknown type. Display in notebooks and string conversion will be unreliable" ) return str(self._value) class AgentText(AgentType, str): """ Text type returned by the agent. Behaves as a string. """ def to_raw(self): return self._value def to_string(self): return str(self._value) class AgentImage(AgentType, PIL.Image.Image): """ Image type returned by the agent. Behaves as a PIL.Image.Image. """ def __init__(self, value): AgentType.__init__(self, value) PIL.Image.Image.__init__(self) self._path = None self._raw = None self._tensor = None if isinstance(value, AgentImage): self._raw, self._path, self._tensor = value._raw, value._path, value._tensor elif isinstance(value, PIL.Image.Image): self._raw = value elif isinstance(value, bytes): self._raw = PIL.Image.open(BytesIO(value)) elif isinstance(value, (str, pathlib.Path)): self._path = value else: try: import torch if isinstance(value, torch.Tensor): self._tensor = value import numpy as np if isinstance(value, np.ndarray): self._tensor = torch.from_numpy(value) except ModuleNotFoundError: pass if self._path is None and self._raw is None and self._tensor is None: raise TypeError(f"Unsupported type for {self.__class__.__name__}: {type(value)}") def _ipython_display_(self, include=None, exclude=None): """ Displays correctly this type in an ipython notebook (ipython, colab, jupyter, ...) """ from IPython.display import Image, display display(Image(self.to_string())) def to_raw(self): """ Returns the "raw" version of that object. In the case of an AgentImage, it is a PIL.Image.Image. """ if self._raw is not None: return self._raw if self._path is not None: self._raw = PIL.Image.open(self._path) return self._raw if self._tensor is not None: import numpy as np array = self._tensor.cpu().detach().numpy() return PIL.Image.fromarray((255 - array * 255).astype(np.uint8)) def to_string(self): """ Returns the stringified version of that object. In the case of an AgentImage, it is a path to the serialized version of the image. """ if self._path is not None: return self._path if self._raw is not None: directory = tempfile.mkdtemp() self._path = os.path.join(directory, str(uuid.uuid4()) + ".png") self._raw.save(self._path, format="png") return self._path if self._tensor is not None: import numpy as np array = self._tensor.cpu().detach().numpy() # There is likely simpler than load into image into save img = PIL.Image.fromarray((255 - array * 255).astype(np.uint8)) directory = tempfile.mkdtemp() self._path = os.path.join(directory, str(uuid.uuid4()) + ".png") img.save(self._path, format="png") return self._path def save(self, output_bytes, format: str = None, **params): """ Saves the image to a file. Args: output_bytes (bytes): The output bytes to save the image to. format (str): The format to use for the output image. The format is the same as in PIL.Image.save. **params: Additional parameters to pass to PIL.Image.save. """ img = self.to_raw() img.save(output_bytes, format=format, **params) class AgentAudio(AgentType, str): """ Audio type returned by the agent. """ def __init__(self, value, samplerate=16_000): if not _is_package_available("soundfile") or not _is_package_available("torch"): raise ModuleNotFoundError( "Please install 'audio' extra to use AgentAudio: `pip install 'smolagents[audio]'`" ) import numpy as np import torch super().__init__(value) self._path = None self._tensor = None self.samplerate = samplerate if isinstance(value, (str, pathlib.Path)): self._path = value elif isinstance(value, torch.Tensor): self._tensor = value elif isinstance(value, tuple): self.samplerate = value[0] if isinstance(value[1], np.ndarray): self._tensor = torch.from_numpy(value[1]) else: self._tensor = torch.tensor(value[1]) else: raise ValueError(f"Unsupported audio type: {type(value)}") def _ipython_display_(self, include=None, exclude=None): """ Displays correctly this type in an ipython notebook (ipython, colab, jupyter, ...) """ from IPython.display import Audio, display display(Audio(self.to_string(), rate=self.samplerate)) def to_raw(self): """ Returns the "raw" version of that object. It is a `torch.Tensor` object. """ import soundfile as sf if self._tensor is not None: return self._tensor import torch if self._path is not None: if "://" in str(self._path): response = requests.get(self._path) response.raise_for_status() tensor, self.samplerate = sf.read(BytesIO(response.content)) else: tensor, self.samplerate = sf.read(self._path) self._tensor = torch.tensor(tensor) return self._tensor def to_string(self): """ Returns the stringified version of that object. In the case of an AgentAudio, it is a path to the serialized version of the audio. """ import soundfile as sf if self._path is not None: return self._path if self._tensor is not None: directory = tempfile.mkdtemp() self._path = os.path.join(directory, str(uuid.uuid4()) + ".wav") sf.write(self._path, self._tensor, samplerate=self.samplerate) return self._path _AGENT_TYPE_MAPPING = {"string": AgentText, "image": AgentImage, "audio": AgentAudio} def handle_agent_input_types(*args, **kwargs): args = [(arg.to_raw() if isinstance(arg, AgentType) else arg) for arg in args] kwargs = {k: (v.to_raw() if isinstance(v, AgentType) else v) for k, v in kwargs.items()} return args, kwargs def handle_agent_output_types(output: Any, output_type: str | None = None) -> Any: if output_type in _AGENT_TYPE_MAPPING: # If the class has defined outputs, we can map directly according to the class definition decoded_outputs = _AGENT_TYPE_MAPPING[output_type](output) return decoded_outputs # If the class does not have defined output, then we map according to the type if isinstance(output, str): return AgentText(output) if isinstance(output, PIL.Image.Image): return AgentImage(output) try: import torch if isinstance(output, torch.Tensor): return AgentAudio(output) except ModuleNotFoundError: pass return output __all__ = ["AgentType", "AgentImage", "AgentText", "AgentAudio"]
smolagents/src/smolagents/agent_types.py/0
{ "file_path": "smolagents/src/smolagents/agent_types.py", "repo_id": "smolagents", "token_count": 3867 }
248
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import base64 import importlib.util import inspect import json import keyword import os import re import time from functools import lru_cache from io import BytesIO from pathlib import Path from textwrap import dedent from typing import TYPE_CHECKING, Any import jinja2 if TYPE_CHECKING: from smolagents.memory import AgentLogger __all__ = ["AgentError"] @lru_cache def _is_package_available(package_name: str) -> bool: return importlib.util.find_spec(package_name) is not None BASE_BUILTIN_MODULES = [ "collections", "datetime", "itertools", "math", "queue", "random", "re", "stat", "statistics", "time", "unicodedata", ] def escape_code_brackets(text: str) -> str: """Escapes square brackets in code segments while preserving Rich styling tags.""" def replace_bracketed_content(match): content = match.group(1) cleaned = re.sub( r"bold|red|green|blue|yellow|magenta|cyan|white|black|italic|dim|\s|#[0-9a-fA-F]{6}", "", content ) return f"\\[{content}\\]" if cleaned.strip() else f"[{content}]" return re.sub(r"\[([^\]]*)\]", replace_bracketed_content, text) class AgentError(Exception): """Base class for other agent-related exceptions""" def __init__(self, message, logger: "AgentLogger"): super().__init__(message) self.message = message logger.log_error(message) def dict(self) -> dict[str, str]: return {"type": self.__class__.__name__, "message": str(self.message)} class AgentParsingError(AgentError): """Exception raised for errors in parsing in the agent""" pass class AgentExecutionError(AgentError): """Exception raised for errors in execution in the agent""" pass class AgentMaxStepsError(AgentError): """Exception raised for errors in execution in the agent""" pass class AgentToolCallError(AgentExecutionError): """Exception raised for errors when incorrect arguments are passed to the tool""" pass class AgentToolExecutionError(AgentExecutionError): """Exception raised for errors when executing a tool""" pass class AgentGenerationError(AgentError): """Exception raised for errors in generation in the agent""" pass def make_json_serializable(obj: Any) -> Any: """Recursive function to make objects JSON serializable""" if obj is None: return None elif isinstance(obj, (str, int, float, bool)): # Try to parse string as JSON if it looks like a JSON object/array if isinstance(obj, str): try: if (obj.startswith("{") and obj.endswith("}")) or (obj.startswith("[") and obj.endswith("]")): parsed = json.loads(obj) return make_json_serializable(parsed) except json.JSONDecodeError: pass return obj elif isinstance(obj, (list, tuple)): return [make_json_serializable(item) for item in obj] elif isinstance(obj, dict): return {str(k): make_json_serializable(v) for k, v in obj.items()} elif hasattr(obj, "__dict__"): # For custom objects, convert their __dict__ to a serializable format return {"_type": obj.__class__.__name__, **{k: make_json_serializable(v) for k, v in obj.__dict__.items()}} else: # For any other type, convert to string return str(obj) def parse_json_blob(json_blob: str) -> tuple[dict[str, str], str]: "Extracts the JSON blob from the input and returns the JSON data and the rest of the input." try: first_accolade_index = json_blob.find("{") last_accolade_index = [a.start() for a in list(re.finditer("}", json_blob))][-1] json_str = json_blob[first_accolade_index : last_accolade_index + 1] json_data = json.loads(json_str, strict=False) return json_data, json_blob[:first_accolade_index] except IndexError: raise ValueError("The model output does not contain any JSON blob.") except json.JSONDecodeError as e: place = e.pos if json_blob[place - 1 : place + 2] == "},\n": raise ValueError( "JSON is invalid: you probably tried to provide multiple tool calls in one action. PROVIDE ONLY ONE TOOL CALL." ) raise ValueError( f"The JSON blob you used is invalid due to the following error: {e}.\n" f"JSON blob was: {json_blob}, decoding failed on that specific part of the blob:\n" f"'{json_blob[place - 4 : place + 5]}'." ) def extract_code_from_text(text: str, code_block_tags: tuple[str, str]) -> str | None: """Extract code from the LLM's output.""" pattern = rf"{code_block_tags[0]}(.*?){code_block_tags[1]}" matches = re.findall(pattern, text, re.DOTALL) if matches: return "\n\n".join(match.strip() for match in matches) return None def parse_code_blobs(text: str, code_block_tags: tuple[str, str]) -> str: """Extract code blocs from the LLM's output. If a valid code block is passed, it returns it directly. Args: text (`str`): LLM's output text to parse. Returns: `str`: Extracted code block. Raises: ValueError: If no valid code block is found in the text. """ matches = extract_code_from_text(text, code_block_tags) if not matches: # Fallback to markdown pattern matches = extract_code_from_text(text, ("```(?:python|py)", "\n```")) if matches: return matches # Maybe the LLM outputted a code blob directly try: ast.parse(text) return text except SyntaxError: pass if "final" in text and "answer" in text: raise ValueError( dedent( f""" Your code snippet is invalid, because the regex pattern {code_block_tags[0]}(.*?){code_block_tags[1]} was not found in it. Here is your code snippet: {text} It seems like you're trying to return the final answer, you can do it as follows: {code_block_tags[0]} final_answer("YOUR FINAL ANSWER HERE") {code_block_tags[1]} """ ).strip() ) raise ValueError( dedent( f""" Your code snippet is invalid, because the regex pattern {code_block_tags[0]}(.*?){code_block_tags[1]} was not found in it. Here is your code snippet: {text} Make sure to include code with the correct pattern, for instance: Thoughts: Your thoughts {code_block_tags[0]} # Your python code here {code_block_tags[1]} """ ).strip() ) MAX_LENGTH_TRUNCATE_CONTENT = 20000 def truncate_content(content: str, max_length: int = MAX_LENGTH_TRUNCATE_CONTENT) -> str: if len(content) <= max_length: return content else: return ( content[: max_length // 2] + f"\n..._This content has been truncated to stay below {max_length} characters_...\n" + content[-max_length // 2 :] ) class ImportFinder(ast.NodeVisitor): def __init__(self): self.packages = set() def visit_Import(self, node): for alias in node.names: # Get the base package name (before any dots) base_package = alias.name.split(".")[0] self.packages.add(base_package) def visit_ImportFrom(self, node): if node.module: # for "from x import y" statements # Get the base package name (before any dots) base_package = node.module.split(".")[0] self.packages.add(base_package) def instance_to_source(instance, base_cls=None): """Convert an instance to its class source code representation.""" cls = instance.__class__ class_name = cls.__name__ # Start building class lines class_lines = [] if base_cls: class_lines.append(f"class {class_name}({base_cls.__name__}):") else: class_lines.append(f"class {class_name}:") # Add docstring if it exists and differs from base if cls.__doc__ and (not base_cls or cls.__doc__ != base_cls.__doc__): class_lines.append(f' """{cls.__doc__}"""') # Add class-level attributes class_attrs = { name: value for name, value in cls.__dict__.items() if not name.startswith("__") and not name == "_abc_impl" and not callable(value) and not (base_cls and hasattr(base_cls, name) and getattr(base_cls, name) == value) } for name, value in class_attrs.items(): if isinstance(value, str): # multiline value if "\n" in value: escaped_value = value.replace('"""', r"\"\"\"") # Escape triple quotes class_lines.append(f' {name} = """{escaped_value}"""') else: class_lines.append(f" {name} = {json.dumps(value)}") else: class_lines.append(f" {name} = {repr(value)}") if class_attrs: class_lines.append("") # Add methods methods = { name: func.__wrapped__ if hasattr(func, "__wrapped__") else func for name, func in cls.__dict__.items() if callable(func) and ( not base_cls or not hasattr(base_cls, name) or ( isinstance(func, (staticmethod, classmethod)) or (getattr(base_cls, name).__code__.co_code != func.__code__.co_code) ) ) } for name, method in methods.items(): method_source = get_source(method) # Clean up the indentation method_lines = method_source.split("\n") first_line = method_lines[0] indent = len(first_line) - len(first_line.lstrip()) method_lines = [line[indent:] for line in method_lines] method_source = "\n".join([" " + line if line.strip() else line for line in method_lines]) class_lines.append(method_source) class_lines.append("") # Find required imports using ImportFinder import_finder = ImportFinder() import_finder.visit(ast.parse("\n".join(class_lines))) required_imports = import_finder.packages # Build final code with imports final_lines = [] # Add base class import if needed if base_cls: final_lines.append(f"from {base_cls.__module__} import {base_cls.__name__}") # Add discovered imports for package in required_imports: final_lines.append(f"import {package}") if final_lines: # Add empty line after imports final_lines.append("") # Add the class code final_lines.extend(class_lines) return "\n".join(final_lines) def get_source(obj) -> str: """Get the source code of a class or callable object (e.g.: function, method). First attempts to get the source code using `inspect.getsource`. In a dynamic environment (e.g.: Jupyter, IPython), if this fails, falls back to retrieving the source code from the current interactive shell session. Args: obj: A class or callable object (e.g.: function, method) Returns: str: The source code of the object, dedented and stripped Raises: TypeError: If object is not a class or callable OSError: If source code cannot be retrieved from any source ValueError: If source cannot be found in IPython history Note: TODO: handle Python standard REPL """ if not (isinstance(obj, type) or callable(obj)): raise TypeError(f"Expected class or callable, got {type(obj)}") inspect_error = None try: # Handle dynamically created classes source = getattr(obj, "__source__", None) or inspect.getsource(obj) return dedent(source).strip() except OSError as e: # let's keep track of the exception to raise it if all further methods fail inspect_error = e try: import IPython shell = IPython.get_ipython() if not shell: raise ImportError("No active IPython shell found") all_cells = "\n".join(shell.user_ns.get("In", [])).strip() if not all_cells: raise ValueError("No code cells found in IPython session") tree = ast.parse(all_cells) for node in ast.walk(tree): if isinstance(node, (ast.ClassDef, ast.FunctionDef)) and node.name == obj.__name__: return dedent("\n".join(all_cells.split("\n")[node.lineno - 1 : node.end_lineno])).strip() raise ValueError(f"Could not find source code for {obj.__name__} in IPython history") except ImportError: # IPython is not available, let's just raise the original inspect error raise inspect_error except ValueError as e: # IPython is available but we couldn't find the source code, let's raise the error raise e from inspect_error def encode_image_base64(image): buffered = BytesIO() image.save(buffered, format="PNG") return base64.b64encode(buffered.getvalue()).decode("utf-8") def make_image_url(base64_image): return f"data:image/png;base64,{base64_image}" def make_init_file(folder: str | Path): os.makedirs(folder, exist_ok=True) # Create __init__ with open(os.path.join(folder, "__init__.py"), "w"): pass def is_valid_name(name: str) -> bool: return name.isidentifier() and not keyword.iskeyword(name) if isinstance(name, str) else False AGENT_GRADIO_APP_TEMPLATE = """import yaml import os from smolagents import GradioUI, {{ class_name }}, {{ agent_dict['model']['class'] }} # Get current directory path CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) {% for tool in tools.values() -%} from {{managed_agent_relative_path}}tools.{{ tool.name }} import {{ tool.__class__.__name__ }} as {{ tool.name | camelcase }} {% endfor %} {% for managed_agent in managed_agents.values() -%} from {{managed_agent_relative_path}}managed_agents.{{ managed_agent.name }}.app import agent_{{ managed_agent.name }} {% endfor %} model = {{ agent_dict['model']['class'] }}( {% for key in agent_dict['model']['data'] if key != 'class' -%} {{ key }}={{ agent_dict['model']['data'][key]|repr }}, {% endfor %}) {% for tool in tools.values() -%} {{ tool.name }} = {{ tool.name | camelcase }}() {% endfor %} with open(os.path.join(CURRENT_DIR, "prompts.yaml"), 'r') as stream: prompt_templates = yaml.safe_load(stream) {{ agent_name }} = {{ class_name }}( model=model, tools=[{% for tool_name in tools.keys() if tool_name != "final_answer" %}{{ tool_name }}{% if not loop.last %}, {% endif %}{% endfor %}], managed_agents=[{% for subagent_name in managed_agents.keys() %}agent_{{ subagent_name }}{% if not loop.last %}, {% endif %}{% endfor %}], {% for attribute_name, value in agent_dict.items() if attribute_name not in ["class", "model", "tools", "prompt_templates", "authorized_imports", "managed_agents", "requirements"] -%} {{ attribute_name }}={{ value|repr }}, {% endfor %}prompt_templates=prompt_templates ) if __name__ == "__main__": GradioUI({{ agent_name }}).launch() """.strip() def create_agent_gradio_app_template(): env = jinja2.Environment(loader=jinja2.BaseLoader(), undefined=jinja2.StrictUndefined) env.filters["repr"] = repr env.filters["camelcase"] = lambda value: "".join(word.capitalize() for word in value.split("_")) return env.from_string(AGENT_GRADIO_APP_TEMPLATE) class RateLimiter: """Simple rate limiter that enforces a minimum delay between consecutive requests. This class is useful for limiting the rate of operations such as API requests, by ensuring that calls to `throttle()` are spaced out by at least a given interval based on the desired requests per minute. If no rate is specified (i.e., `requests_per_minute` is None), rate limiting is disabled and `throttle()` becomes a no-op. Args: requests_per_minute (`float | None`): Maximum number of allowed requests per minute. Use `None` to disable rate limiting. """ def __init__(self, requests_per_minute: float | None = None): self._enabled = requests_per_minute is not None self._interval = 60.0 / requests_per_minute if self._enabled else 0.0 self._last_call = 0.0 def throttle(self): """Pause execution to respect the rate limit, if enabled.""" if not self._enabled: return now = time.time() elapsed = now - self._last_call if elapsed < self._interval: time.sleep(self._interval - elapsed) self._last_call = time.time()
smolagents/src/smolagents/utils.py/0
{ "file_path": "smolagents/src/smolagents/utils.py", "repo_id": "smolagents", "token_count": 6942 }
249
import json from textwrap import dedent import pytest from mcp import StdioServerParameters from smolagents.mcp_client import MCPClient @pytest.fixture def echo_server_script(): return dedent( ''' from mcp.server.fastmcp import FastMCP mcp = FastMCP("Echo Server") @mcp.tool() def echo_tool(text: str) -> str: """Echo the input text""" return f"Echo: {text}" mcp.run() ''' ) @pytest.fixture def structured_output_server_script(): return dedent( ''' from mcp.server.fastmcp import FastMCP from typing import Any mcp = FastMCP("Structured Output Server") @mcp.tool() def user_info_tool(name: str) -> dict[str, Any]: """Get user information as structured data""" user_data = { "name": name, "age": 25, "email": f"{name.lower()}@example.com", "active": True } return user_data mcp.run() ''' ) # Ignore FutureWarning about structured_output default value change: this test intentionally uses default behavior @pytest.mark.filterwarnings("ignore:.*structured_output:FutureWarning") def test_mcp_client_with_syntax(echo_server_script: str): """Test the MCPClient with the context manager syntax.""" server_parameters = StdioServerParameters(command="python", args=["-c", echo_server_script]) with MCPClient(server_parameters) as tools: assert len(tools) == 1 assert tools[0].name == "echo_tool" assert tools[0].forward(**{"text": "Hello, world!"}) == "Echo: Hello, world!" def test_mcp_client_with_structured_output(structured_output_server_script: str): """Test the MCPClient with structured_output=True parameter.""" server_parameters = StdioServerParameters(command="python", args=["-c", structured_output_server_script]) with MCPClient(server_parameters, structured_output=True) as tools: assert len(tools) == 1 assert tools[0].name == "user_info_tool" assert tools[0].output_type == "object" # Should be object due to outputSchema # Check the output schema {'additionalProperties': True, 'title': 'user_info_toolDictOutput', 'type': 'object'} assert tools[0].output_schema is not None schema = tools[0].output_schema assert isinstance(schema, dict) assert schema.get("type") == "object" # Test that structured output is properly parsed result = tools[0].forward(**{"name": "Alice"}) assert isinstance(result, dict) assert result["name"] == "Alice" assert result["age"] == 25 assert result["email"] == "alice@example.com" assert result["active"] is True def test_mcp_client_without_structured_output(structured_output_server_script: str): """Test the MCPClient with structured_output=False (default) for comparison.""" server_parameters = StdioServerParameters(command="python", args=["-c", structured_output_server_script]) with MCPClient(server_parameters, structured_output=False) as tools: assert len(tools) == 1 assert tools[0].name == "user_info_tool" assert tools[0].output_type == "object" # Test that output is returned as raw text result = tools[0].forward(**{"name": "Alice"}) assert isinstance(result, str) # Should be JSON string, not parsed object parsed_result = json.loads(result) assert parsed_result["name"] == "Alice" # Ignore FutureWarning about structured_output default value change: this test intentionally uses default behavior @pytest.mark.filterwarnings("ignore:.*structured_output:FutureWarning") def test_mcp_client_try_finally_syntax(echo_server_script: str): """Test the MCPClient with the try ... finally syntax.""" server_parameters = StdioServerParameters(command="python", args=["-c", echo_server_script]) mcp_client = MCPClient(server_parameters) try: tools = mcp_client.get_tools() assert len(tools) == 1 assert tools[0].name == "echo_tool" assert tools[0].forward(**{"text": "Hello, world!"}) == "Echo: Hello, world!" finally: mcp_client.disconnect() # Ignore FutureWarning about structured_output default value change: this test intentionally uses default behavior @pytest.mark.filterwarnings("ignore:.*structured_output:FutureWarning") def test_multiple_servers(echo_server_script: str): """Test the MCPClient with multiple servers.""" server_parameters = [ StdioServerParameters(command="python", args=["-c", echo_server_script]), StdioServerParameters(command="python", args=["-c", echo_server_script]), ] with MCPClient(server_parameters) as tools: assert len(tools) == 2 assert tools[0].name == "echo_tool" assert tools[1].name == "echo_tool" assert tools[0].forward(**{"text": "Hello, world!"}) == "Echo: Hello, world!" assert tools[1].forward(**{"text": "Hello, world!"}) == "Echo: Hello, world!"
smolagents/tests/test_mcp_client.py/0
{ "file_path": "smolagents/tests/test_mcp_client.py", "repo_id": "smolagents", "token_count": 1956 }
250
include Makefile-flash-att include Makefile-flash-att-v2 include Makefile-vllm include Makefile-awq include Makefile-eetq include Makefile-selective-scan PROTO_PATH ?= ../proto/v3 unit-tests: pytest -s -vv -m "not private" tests gen-server: # Compile protos pip install grpcio-tools==1.62.2 mypy-protobuf==3.6.0 'types-protobuf' --no-cache-dir mkdir text_generation_server/pb || true python -m grpc_tools.protoc -I$(PROTO_PATH) --python_out=text_generation_server/pb \ --grpc_python_out=text_generation_server/pb --mypy_out=text_generation_server/pb $(PROTO_PATH)/generate.proto find text_generation_server/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \; touch text_generation_server/pb/__init__.py install: gen-server pip install pip --upgrade pip install --no-deps -r requirements.txt pip install -e "." run-dev: SAFETENSORS_FAST_GPU=1 python -m torch.distributed.run --nproc_per_node=2 text_generation_server/cli.py serve bigscience/bloom-560m --sharded install-poetry: curl -sSL https://install.python-poetry.org | python3 - update-lock: rm poetry.lock poetry lock --no-update export-requirements: poetry export -o requirements.txt --without-hashes
text-generation-inference/backends/gaudi/server/Makefile/0
{ "file_path": "text-generation-inference/backends/gaudi/server/Makefile", "repo_id": "text-generation-inference", "token_count": 468 }
251
# coding=utf-8 # Copyright 2023, 2024 DeepSeek-AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Tuple, Optional import torch def grouped_topk( hidden_states: torch.Tensor, gating_output: torch.Tensor, topk: int, renormalize: bool, num_expert_group: int = 0, topk_group: int = 0, scoring_func: str = "softmax", e_score_correction_bias: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: assert hidden_states.shape[0] == gating_output.shape[0], "Number of tokens mismatch" gating_output = gating_output.float() if e_score_correction_bias is not None: e_score_correction_bias = e_score_correction_bias.float() if scoring_func == "softmax": scores = torch.softmax(gating_output, dim=-1) elif scoring_func == "sigmoid": scores = gating_output.sigmoid() else: raise ValueError(f"Unsupported scoring function: {scoring_func}") num_token = scores.shape[0] if e_score_correction_bias is not None: # Store original scores before applying correction bias. We use biased # scores for expert selection but original scores for routing weights original_scores = scores scores = scores + e_score_correction_bias.unsqueeze(0) group_scores = ( scores.view(num_token, num_expert_group, -1).topk(2, dim=-1)[0].sum(dim=-1) ) else: group_scores = ( scores.view(num_token, num_expert_group, -1).max(dim=-1).values ) # [n, n_group] group_idx = torch.topk(group_scores, k=topk_group, dim=-1, sorted=False)[ 1 ] # [n, top_k_group] group_mask = torch.zeros_like(group_scores) # [n, n_group] group_mask.scatter_(1, group_idx, 1) # [n, n_group] score_mask = ( group_mask.unsqueeze(-1) .expand(num_token, num_expert_group, scores.shape[-1] // num_expert_group) .reshape(num_token, -1) ) # [n, e] tmp_scores = scores.masked_fill(~score_mask.bool(), float("-inf")) # [n, e] if e_score_correction_bias is not None: topk_ids = torch.topk(tmp_scores, k=topk, dim=-1, sorted=False)[1] # Use original unbiased scores for the routing weights topk_weights = original_scores.gather(1, topk_ids) else: topk_weights, topk_ids = torch.topk(tmp_scores, k=topk, dim=-1, sorted=False) if renormalize: topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True) return topk_weights.to(torch.float32), topk_ids.to(torch.int32) def fused_topk( hidden_states: torch.Tensor, gating_output: torch.Tensor, topk: int, renormalize: bool, ) -> Tuple[torch.Tensor, torch.Tensor]: topk_weights = torch.nn.functional.softmax( gating_output, dim=1, dtype=torch.float32 ) topk_weights, topk_ids = torch.topk(topk_weights, topk, dim=-1) if renormalize: topk_weights /= topk_weights.sum(dim=-1, keepdim=True) return topk_weights, topk_ids def select_experts( hidden_states: torch.Tensor, router_logits: torch.Tensor, top_k: int, use_grouped_topk: bool, renormalize: bool, topk_group: Optional[int] = None, num_expert_group: Optional[int] = None, scoring_func: str = "softmax", e_score_correction_bias: Optional[torch.Tensor] = None, ): # DeekSeekv2 uses grouped_top_k if use_grouped_topk: assert topk_group is not None assert num_expert_group is not None topk_weights, topk_ids = grouped_topk( hidden_states=hidden_states, gating_output=router_logits, topk=top_k, renormalize=renormalize, num_expert_group=num_expert_group, topk_group=topk_group, scoring_func=scoring_func, e_score_correction_bias=e_score_correction_bias, ) else: topk_weights, topk_ids = fused_topk( hidden_states=hidden_states, gating_output=router_logits, topk=top_k, renormalize=renormalize, ) return topk_weights, topk_ids
text-generation-inference/backends/gaudi/server/text_generation_server/layers/moe/fused_moe.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/layers/moe/fused_moe.py", "repo_id": "text-generation-inference", "token_count": 2009 }
252
import asyncio from pathlib import Path from typing import List from grpc import aio from grpc_reflection.v1alpha import reflection from loguru import logger from .generator import Generator, NeuronGenerator from .interceptor import ExceptionInterceptor from .pb import generate_pb2, generate_pb2_grpc class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer): def __init__(self, generator: Generator, server_urls: List[str]): self.generator = generator self.server_urls = server_urls async def Info(self, request, context): return self.generator.info async def Health(self, request, context): return generate_pb2.HealthResponse() async def ServiceDiscovery(self, request, context): return generate_pb2.ServiceDiscoveryResponse(urls=self.server_urls) async def ClearCache(self, request, context): if request.HasField("id"): self.generator.clear(request.id) else: self.generator.clear() return generate_pb2.ClearCacheResponse() async def FilterBatch(self, request, context): filtered_batch = self.generator.filter(request.batch_id, request.request_ids) return generate_pb2.FilterBatchResponse(batch=filtered_batch) async def Warmup(self, request, context): max_tokens = self.generator.warmup(request.batch) return generate_pb2.WarmupResponse(max_supported_total_tokens=max_tokens) async def Prefill(self, request, context): generations, batch = self.generator.prefill(request.batch) return generate_pb2.PrefillResponse(generations=generations, batch=batch) async def Decode(self, request, context): generations, batch = self.generator.decode(request.batches) return generate_pb2.DecodeResponse(generations=generations, batch=batch) def serve( model_id: str, revision: str, uds_path: Path, ): async def serve_inner(model_id: str, revision: str): unix_socket_template = "unix://{}-{}" local_url = unix_socket_template.format(uds_path, 0) server_urls = [local_url] try: generator = NeuronGenerator.from_pretrained(model_id, revision) except Exception: logger.exception("Error when initializing model") raise server = aio.server(interceptors=[ExceptionInterceptor()]) generate_pb2_grpc.add_TextGenerationServiceServicer_to_server( TextGenerationService(generator, server_urls), server ) SERVICE_NAMES = ( generate_pb2.DESCRIPTOR.services_by_name["TextGenerationService"].full_name, reflection.SERVICE_NAME, ) reflection.enable_server_reflection(SERVICE_NAMES, server) server.add_insecure_port(local_url) await server.start() logger.info("Server started at {}".format(local_url)) try: await server.wait_for_termination() except KeyboardInterrupt: logger.info("Signal received. Shutting down") await server.stop(0) asyncio.run(serve_inner(model_id, revision))
text-generation-inference/backends/neuron/server/text_generation_server/server.py/0
{ "file_path": "text-generation-inference/backends/neuron/server/text_generation_server/server.py", "repo_id": "text-generation-inference", "token_count": 1222 }
253
#!/usr/bin/env python import logging import os import sys from text_generation_server.tgi_env import ( available_cores, get_env_dict, get_neuron_config_for_model, neuron_config_to_env, neuronxcc_version, parse_cmdline_and_set_env, tgi_env_vars, ) logger = logging.getLogger(__name__) def main(): """ This script determines proper default TGI env variables for the neuron precompiled models to work properly :return: """ args = parse_cmdline_and_set_env() for env_var in tgi_env_vars: if not os.getenv(env_var): break else: logger.info( "All env vars %s already set, skipping, user know what they are doing", tgi_env_vars, ) sys.exit(0) neuron_config = get_neuron_config_for_model(args.model_id, args.revision) if not neuron_config: msg = ( "No compatible neuron config found. Provided env {}, available cores {}, neuronxcc version {}" ).format(get_env_dict(), available_cores, neuronxcc_version) logger.error(msg) raise Exception(msg) neuron_config_to_env(neuron_config) if __name__ == "__main__": main()
text-generation-inference/backends/neuron/tgi_entry_point.py/0
{ "file_path": "text-generation-inference/backends/neuron/tgi_entry_point.py", "repo_id": "text-generation-inference", "token_count": 515 }
254
pub use looper::TensorRtLlmBackendV2; pub mod errors; mod looper; mod utils; #[cxx::bridge(namespace = "huggingface::tgi::backends::trtllm")] mod ffi { #[cxx_name = "finish_reason_t"] #[derive(Debug, Clone, Copy)] pub enum FinishReason { /// The request is not finished. #[cxx_name = "kNOT_FINISHED"] NotFinished = 0u8, /// The request finished because the end id was generated. #[cxx_name = "kEND_ID"] EndTokenId = 1u8, /// The request finished because a stop word was generated. #[cxx_name = "kSTOP_WORDS"] StopWords = 2u8, /// The request finished because the maximum number of tokens was reached. #[cxx_name = "kLENGTH"] MaxLength = 3u8, } /// Struct used as shared type between rust and C++ to represent the result /// of a single decoding iteration #[cxx_name = "generation_step_t"] #[derive(Debug, Clone)] pub struct GenerationStep { request_id: u64, token_id: u32, log_prob: f32, is_final: bool, finish_reason: FinishReason, has_error: bool, error_msg: String, } unsafe extern "C++" { include!("backends/trtllm/csrc/ffi.hpp"); /// Represent an instance of the underlying TensorRT-LLM backend #[cxx_name = "tensorrt_llm_backend_t"] type TensorRtLlmBackendImpl; /// Create an instance backed behind a std::unique_ptr to manage the lifespan of the backend /// /// # Arguments /// /// * `engine_folder`: Path to the folder containing all the TRTLLM engines /// * `executor_worker`: Path to the TRTLLM executor worker /// /// returns: <unknown> /// /// # Examples /// /// ``` /// /// ``` fn create_backend_from_engine_folder( engine_folder: &str, executor_worker: &str, ) -> Result<UniquePtr<TensorRtLlmBackendImpl>>; fn num_tokens_ready(self: &TensorRtLlmBackendImpl) -> usize; fn submit( self: Pin<&mut TensorRtLlmBackendImpl>, tokens: &[u32], max_new_tokens: u32, top_k: u32, top_p: f32, temperature: f32, repetition_penalty: f32, frequency_penalty: f32, seed: u64, ) -> Result<u64>; fn pull_tokens( self: Pin<&mut TensorRtLlmBackendImpl>, ) -> Result<UniquePtr<CxxVector<GenerationStep>>>; fn cancel(self: Pin<&mut TensorRtLlmBackendImpl>, request_id: u64); } } use ffi::FinishReason; use text_generation_router::FinishReason as InferFinishReason; impl From<FinishReason> for InferFinishReason { fn from(reason: FinishReason) -> Self { match reason { FinishReason::StopWords => InferFinishReason::StopSequence, FinishReason::MaxLength => InferFinishReason::Length, FinishReason::EndTokenId => InferFinishReason::EndOfSequenceToken, _ => panic!("Cannot convert {reason:?} to text_generation_router::FinishReason"), } } }
text-generation-inference/backends/trtllm/src/lib.rs/0
{ "file_path": "text-generation-inference/backends/trtllm/src/lib.rs", "repo_id": "text-generation-inference", "token_count": 1463 }
255
use std::sync::Arc; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use rand::Rng; use text_generation_router_v3::block_allocator::Allocator; use text_generation_router_v3::radix::RadixAllocator; fn prefix_cache_benchmark(c: &mut Criterion) { // let prefixes: Vec<Vec<u32>> = (0..8192) // .chunks(256) // .into_iter() // .map(|c| c.collect()) // .collect(); let mut cache = RadixAllocator::new(1, 262144, None); c.bench_function("Radix allocator", |b| { b.iter_batched( || { //prefixes // .choose_multiple(&mut rand::thread_rng(), 5) // .fold(Vec::new(), |mut v, s| { // v.extend(s); // v // }) (0..7936) .map(|_| rand::thread_rng().gen_range(0..1024)) .collect::<Vec<u32>>() }, |prefill| { let alloc = cache.allocate( prefill.len() as u32 + 13, Some(Arc::new(black_box(prefill))), ); if let Some(alloc) = alloc { cache.free(alloc.blocks.clone(), alloc.allocation_id); } }, criterion::BatchSize::SmallInput, ); }); } criterion_group!(benches, prefix_cache_benchmark); criterion_main!(benches);
text-generation-inference/backends/v3/benches/prefix_cache.rs/0
{ "file_path": "text-generation-inference/backends/v3/benches/prefix_cache.rs", "repo_id": "text-generation-inference", "token_count": 806 }
256
mod app; mod event; mod generation; mod table; mod utils; use crate::app::App; use crate::event::Event; use ratatui::backend::CrosstermBackend; use ratatui::crossterm::ExecutableCommand; use ratatui::Terminal; use std::io; use text_generation_client::v3::{GrammarType, NextTokenChooserParameters, ShardedClient}; use tokenizers::Tokenizer; use tokio::sync::{broadcast, mpsc}; /// Run benchmarking app #[allow(clippy::too_many_arguments)] pub async fn run( tokenizer_name: String, tokenizer: Tokenizer, batch_size: Vec<u32>, sequence_length: u32, decode_length: u32, top_n_tokens: Option<u32>, n_runs: usize, warmups: usize, temperature: Option<f32>, top_k: Option<u32>, top_p: Option<f32>, typical_p: Option<f32>, repetition_penalty: Option<f32>, frequency_penalty: Option<f32>, watermark: bool, do_sample: bool, client: ShardedClient, ) -> Result<(), std::io::Error> { let parameters = NextTokenChooserParameters { temperature: temperature.unwrap_or(1.0), top_k: top_k.unwrap_or(0), top_p: top_p.unwrap_or(1.0), typical_p: typical_p.unwrap_or(1.0), do_sample, seed: 0, repetition_penalty: repetition_penalty.unwrap_or(1.0), frequency_penalty: frequency_penalty.unwrap_or(0.0), watermark, grammar: String::new(), grammar_type: GrammarType::None as i32, }; // Initialize terminal properties ratatui::crossterm::terminal::enable_raw_mode()?; io::stdout().execute(ratatui::crossterm::terminal::EnterAlternateScreen)?; io::stdout().execute(ratatui::crossterm::cursor::Hide)?; // Initialize terminal let mut terminal = { let backend = CrosstermBackend::new(io::stdout()); Terminal::new(backend)? }; // Create message channel between generation_task and app let (run_sender, run_receiver) = mpsc::channel(8); // Crossterm event channel let (event_sender, mut event_receiver) = mpsc::channel(8); // Shutdown channel to terminate tasks let (shutdown_sender, _) = broadcast::channel(1); // Channel to check if tasks terminated let (shutdown_guard_sender, mut shutdown_guard_receiver) = mpsc::channel(1); // Create generation task tokio::spawn(generation::generation_task( tokenizer, batch_size.clone(), sequence_length, decode_length, top_n_tokens, n_runs, warmups, parameters, client, run_sender, shutdown_sender.subscribe(), shutdown_guard_sender.clone(), )); // Create event task tokio::spawn(event::terminal_event_task( 250, event_sender, shutdown_sender.subscribe(), shutdown_guard_sender.clone(), )); // Drop our end of shutdown sender drop(shutdown_guard_sender); // Create App let mut app = App::new( run_receiver, tokenizer_name.clone(), sequence_length, decode_length, n_runs, batch_size, ); while app.running { // Draw frame terminal.draw(|frame| app.render(frame))?; // Await a new event from event handling task match event_receiver.recv().await { None => break, // Update app state Some(event) => match event { Event::Tick => app.tick(), Event::Key(key_event) => app.handle_key_event(key_event), _ => {} }, } } // Ask tasks to shutdown let _ = shutdown_sender.send(()); // Wait for tasks to shutdown let _ = shutdown_guard_receiver.recv().await; // Revert terminal to original view io::stdout().execute(ratatui::crossterm::terminal::LeaveAlternateScreen)?; ratatui::crossterm::terminal::disable_raw_mode()?; io::stdout().execute(ratatui::crossterm::cursor::Show)?; let parameters_table = table::parameters_table( tokenizer_name, sequence_length, decode_length, top_n_tokens, n_runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, frequency_penalty, watermark, do_sample, ); println!("\n{parameters_table}\n"); let latency_table = table::latency_table(&app.data); println!("\n{latency_table}\n"); let throughput_table = table::throughput_table(&app.data); println!("\n{throughput_table}\n"); Ok(()) }
text-generation-inference/benchmark/src/lib.rs/0
{ "file_path": "text-generation-inference/benchmark/src/lib.rs", "repo_id": "text-generation-inference", "token_count": 1979 }
257
from typing import Dict # Text Generation Inference Errors class ValidationError(Exception): def __init__(self, message: str): super().__init__(message) class GenerationError(Exception): def __init__(self, message: str): super().__init__(message) class OverloadedError(Exception): def __init__(self, message: str): super().__init__(message) class IncompleteGenerationError(Exception): def __init__(self, message: str): super().__init__(message) # API Inference Errors class BadRequestError(Exception): def __init__(self, message: str): super().__init__(message) class ShardNotReadyError(Exception): def __init__(self, message: str): super().__init__(message) class ShardTimeoutError(Exception): def __init__(self, message: str): super().__init__(message) class NotFoundError(Exception): def __init__(self, message: str): super().__init__(message) class RateLimitExceededError(Exception): def __init__(self, message: str): super().__init__(message) class NotSupportedError(Exception): def __init__(self, model_id: str): message = ( f"Model `{model_id}` is not available for inference with this client. \n" "Use `huggingface_hub.inference_api.InferenceApi` instead." ) super(NotSupportedError, self).__init__(message) # Unknown error class UnknownError(Exception): def __init__(self, message: str): super().__init__(message) def parse_error(status_code: int, payload: Dict[str, str]) -> Exception: """ Parse error given an HTTP status code and a json payload Args: status_code (`int`): HTTP status code payload (`Dict[str, str]`): Json payload Returns: Exception: parsed exception """ # Try to parse a Text Generation Inference error message = payload["error"] if "error_type" in payload: error_type = payload["error_type"] if error_type == "generation": return GenerationError(message) if error_type == "incomplete_generation": return IncompleteGenerationError(message) if error_type == "overloaded": return OverloadedError(message) if error_type == "validation": return ValidationError(message) # Try to parse a APIInference error if status_code == 400: return BadRequestError(message) if status_code == 403 or status_code == 424: return ShardNotReadyError(message) if status_code == 504: return ShardTimeoutError(message) if status_code == 404: return NotFoundError(message) if status_code == 429: return RateLimitExceededError(message) # Fallback to an unknown error return UnknownError(message)
text-generation-inference/clients/python/text_generation/errors.py/0
{ "file_path": "text-generation-inference/clients/python/text_generation/errors.py", "repo_id": "text-generation-inference", "token_count": 1080 }
258
# Non-core Model Serving TGI supports various LLM architectures (see full list [here](../supported_models)). If you wish to serve a model that is not one of the supported models, TGI will fallback to the `transformers` implementation of that model. This means you will be unable to use some of the features introduced by TGI, such as tensor-parallel sharding or flash attention. However, you can still get many benefits of TGI, such as continuous batching or streaming outputs. You can serve these models using the same Docker command-line invocation as with fully supported models 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id gpt2 ``` If the model you wish to serve is a custom transformers model, and its weights and implementation are available in the Hub, you can still serve the model by passing the `--trust-remote-code` flag to the `docker run` command like below 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id <CUSTOM_MODEL_ID> --trust-remote-code ``` Finally, if the model is not on Hugging Face Hub but on your local, you can pass the path to the folder that contains your model like below 👇 ```bash # Make sure your model is in the $volume directory docker run --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id /data/<PATH-TO-FOLDER> ``` You can refer to [transformers docs on custom models](https://huggingface.co/docs/transformers/main/en/custom_models) for more information.
text-generation-inference/docs/source/basic_tutorials/non_core_models.md/0
{ "file_path": "text-generation-inference/docs/source/basic_tutorials/non_core_models.md", "repo_id": "text-generation-inference", "token_count": 472 }
259
# Streaming ## What is Streaming? Token streaming is the mode in which the server returns the tokens one by one as the model generates them. This enables showing progressive generations to the user rather than waiting for the whole generation. Streaming is an essential aspect of the end-user experience as it reduces latency, one of the most critical aspects of a smooth experience. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/streaming-generation-visual_360.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/streaming-generation-visual-dark_360.gif" /> </div> With token streaming, the server can start returning the tokens one by one before having to generate the whole response. Users can have a sense of the generation's quality before the end of the generation. This has different positive effects: * Users can get results orders of magnitude earlier for extremely long queries. * Seeing something in progress allows users to stop the generation if it's not going in the direction they expect. * Perceived latency is lower when results are shown in the early stages. * When used in conversational UIs, the experience feels more natural. For example, a system can generate 100 tokens per second. If the system generates 1000 tokens, with the non-streaming setup, users need to wait 10 seconds to get results. On the other hand, with the streaming setup, users get initial results immediately, and although end-to-end latency will be the same, they can see half of the generation after five seconds. Below you can see an interactive demo that shows non-streaming vs streaming side-by-side. Click **generate** below. <div class="block dark:hidden"> <iframe src="https://osanseviero-streaming-vs-non-streaming.hf.space?__theme=light" width="850" height="350" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://osanseviero-streaming-vs-non-streaming.hf.space?__theme=dark" width="850" height="350" ></iframe> </div> ## How to use Streaming? ### Streaming with Python To stream tokens with `InferenceClient`, simply pass `stream=True` and iterate over the response. ```python from huggingface_hub import InferenceClient client = InferenceClient(base_url="http://127.0.0.1:8080") output = client.chat.completions.create( messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Count to 10"}, ], stream=True, max_tokens=1024, ) for chunk in output: print(chunk.choices[0].delta.content) # 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 ``` The `huggingface_hub` library also comes with an `AsyncInferenceClient` in case you need to handle the requests concurrently. ```python from huggingface_hub import AsyncInferenceClient client = AsyncInferenceClient(base_url="http://127.0.0.1:8080") async def main(): stream = await client.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], stream=True, ) async for chunk in stream: print(chunk.choices[0].delta.content or "", end="") asyncio.run(main()) # This # is # a # test #. ``` ### Streaming with cURL To use the OpenAI Chat Completions compatible Messages API `v1/chat/completions` endpoint with curl, you can add the `-N` flag, which disables curl default buffering and shows data as it arrives from the server ```curl curl localhost:8080/v1/chat/completions \ -X POST \ -d '{ "model": "tgi", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is deep learning?" } ], "stream": true, "max_tokens": 20 }' \ -H 'Content-Type: application/json' ``` ### Streaming with JavaScript First, we need to install the `@huggingface/inference` library. ```bash npm install @huggingface/inference ``` Whether you use Inference Providers (our serverless API), or Inference Endpoints, you can call `InferenceClient`. ```js import { InferenceClient } from '@huggingface/inference'; const client = new InferenceClient('hf_YOUR_TOKEN', { endpointUrl: 'https://YOUR_ENDPOINT.endpoints.huggingface.cloud' }); // prompt const prompt = 'What can you do in Nuremberg, Germany? Give me 3 Tips'; const stream = client.textGenerationStream({ inputs: prompt }); for await (const r of stream) { // yield the generated token process.stdout.write(r.token.text); } ``` ## How does Streaming work under the hood? Under the hood, TGI uses Server-Sent Events (SSE). In an SSE Setup, a client sends a request with the data, opening an HTTP connection and subscribing to updates. Afterward, the server sends data to the client. There is no need for further requests; the server will keep sending the data. SSEs are unidirectional, meaning the client does not send other requests to the server. SSE sends data over HTTP, making it easy to use. SSEs are different than: * Polling: where the client keeps calling the server to get data. This means that the server might return empty responses and cause overhead. * Webhooks: where there is a bi-directional connection. The server can send information to the client, but the client can also send data to the server after the first request. Webhooks are more complex to operate as they don’t only use HTTP. If there are too many requests at the same time, TGI returns an HTTP Error with an `overloaded` error type (`huggingface_hub` returns `OverloadedError`). This allows the client to manage the overloaded server (e.g., it could display a busy error to the user or retry with a new request). To configure the maximum number of concurrent requests, you can specify `--max_concurrent_requests`, allowing clients to handle backpressure.
text-generation-inference/docs/source/conceptual/streaming.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/streaming.md", "repo_id": "text-generation-inference", "token_count": 1861 }
260
# Collection of Usage Statistics Text Generation Inference collects anonymous usage statistics to help us improve the service. The collected data is used to improve TGI and to understand what causes failures. The data is collected transparently and any sensitive information is omitted. Usage statistics are collected only when TGI is running in a Docker container. This prevents data collection when TGI is run directly on the host machine. The collected data includes startup and shutdown events, as well as a heartbeat signal sent every 15 minutes. ## What data is collected The code that collects the data is available [here](https://github.com/huggingface/text-generation-inference/blob/main/router/src/usage_stats.rs). As of release 2.1.2 this is an example of the data collected: - From the TGI configuration: ```json { "event_type": "start", "disable_grammar_support": false, "max_batch_prefill_tokens": 4096, "max_batch_size": null, "max_batch_total_tokens": null, "max_best_of": 2, "max_client_batch_size": 4, "max_concurrent_requests": 128, "max_input_tokens": 1024, "max_stop_sequences": 4, "max_top_n_tokens": 5, "max_total_tokens": 2048, "max_waiting_tokens": 20, "model_config": { "model_type": "Bloom" }, "revision": null, "tokenizer_class": "BloomTokenizerFast", "validation_workers": 2, "waiting_served_ratio": 1.2, "docker_label": "latest", "git_sha": "cfc118704880453d29bcbe4fbbd91dda501cf5fe", "nvidia_env": { "name": "NVIDIA A10G", "pci_bus_id": "00000000:00:1E.0", "driver_version": "535.183.01", "pstate": "P8", "pcie_link_gen_max": "4", "pcie_link_gen_current": "1", "temperature_gpu": "31", "utilization_gpu": "0 %", "utilization_memory": "0 %", "memory_total": "23028 MiB", "memory_free": "22515 MiB", "memory_used": "0 MiB", "reset_status_reset_required": "No", "reset_status_drain_and_reset_recommended": "No", "compute_cap": "8.6", "ecc_errors_corrected_volatile_total": "0", "mig_mode_current": "[N/A]", "power_draw_instant": "10.86 W", "power_limit": "300.00 W" }, "system_env": { "cpu_count": 16, "cpu_type": "AMD EPYC 7R32", "total_memory": 66681196544, "architecture": "x86_64", "platform": "linux-unix-x86_64" } } ``` ## How to opt-out By passing the `--usage-stats` to the text-generation-launcher you can control how much usage statistics are being collected. `--usage-stats=no-stack` will not emit the stack traces from errors and the error types, but will continue to send start and stop events `--usage-stats=off` will completely disable everything
text-generation-inference/docs/source/usage_statistics.md/0
{ "file_path": "text-generation-inference/docs/source/usage_statistics.md", "repo_id": "text-generation-inference", "token_count": 966 }
261
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 185, "logprob": -1.546875, "special": false, "text": "\n" }, { "id": 549, "logprob": -2.859375, "special": false, "text": "The" }, { "id": 1727, "logprob": -2.484375, "special": false, "text": " test" }, { "id": 3102, "logprob": -0.83203125, "special": false, "text": " request" }, { "id": 317, "logprob": -1.1484375, "special": false, "text": " is" }, { "id": 245, "logprob": -1.578125, "special": false, "text": " a" }, { "id": 3412, "logprob": -2.578125, "special": false, "text": " document" }, { "id": 344, "logprob": -1.125, "special": false, "text": " that" }, { "id": 317, "logprob": -1.6953125, "special": false, "text": " is" }, { "id": 1222, "logprob": -1.71875, "special": false, "text": " used" } ], "top_tokens": null }, "generated_text": "\nThe test request is a document that is used" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_deepseek_v2/test_flash_deepseek_v2.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_deepseek_v2/test_flash_deepseek_v2.json", "repo_id": "text-generation-inference", "token_count": 858 }
262
{ "choices": [ { "finish_reason": "stop", "index": 0, "logprobs": null, "message": { "content": "Here's a description of what's shown in the image:\n\nThe image depicts a brown cow standing on a sandy beach. The beach has turquoise water and a distant island visible in the background. The sky is bright blue with some white clouds. \n\nIt's a quite a humorous and unusual scene – a cow enjoying a beach day!", "name": null, "role": "assistant", "tool_calls": null }, "usage": null } ], "created": 1747216083, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion", "system_fingerprint": "3.3.4-dev0-native", "usage": { "completion_tokens": 72, "prompt_tokens": 275, "total_tokens": 347 } }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma3/test_flash_gemma3_image_cow.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma3/test_flash_gemma3_image_cow.json", "repo_id": "text-generation-inference", "token_count": 334 }
263
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": 0, "tokens": [ { "id": 13, "logprob": -1.9980469, "special": false, "text": "." }, { "id": 578, "logprob": -0.15795898, "special": false, "text": " The" }, { "id": 3622, "logprob": -1.0458984, "special": false, "text": " server" }, { "id": 31680, "logprob": -1.3623047, "special": false, "text": " responds" }, { "id": 449, "logprob": 0.0, "special": false, "text": " with" }, { "id": 264, "logprob": 0.0, "special": false, "text": " a" }, { "id": 330, "logprob": -0.5678711, "special": false, "text": " \"" }, { "id": 1049, "logprob": -0.12322998, "special": false, "text": "200" }, { "id": 10619, "logprob": 0.0, "special": false, "text": " OK" }, { "id": 1, "logprob": 0.0, "special": false, "text": "\"" } ], "top_tokens": null }, "generated_text": "Test request. The server responds with a \"200 OK\"" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_exl2/test_flash_llama_exl2_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_exl2/test_flash_llama_exl2_all_params.json", "repo_id": "text-generation-inference", "token_count": 856 }
264
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": 0, "tokens": [ { "id": 311, "logprob": -1.4277344, "special": false, "text": " to" }, { "id": 279, "logprob": -0.65478516, "special": false, "text": " the" }, { "id": 2473, "logprob": -1.8300781, "special": false, "text": " service" }, { "id": 382, "logprob": -0.75, "special": false, "text": ".\n\n" }, { "id": 286, "logprob": -0.11621094, "special": false, "text": " " }, { "id": 549, "logprob": 0.0, "special": false, "text": " :" }, { "id": 689, "logprob": -0.48608398, "special": false, "text": "return" }, { "id": 25, "logprob": 0.0, "special": false, "text": ":" }, { "id": 5949, "logprob": -0.5756836, "special": false, "text": " Response" }, { "id": 504, "logprob": -0.24499512, "special": false, "text": " from" } ], "top_tokens": null }, "generated_text": "Test request to the service.\n\n :return: Response from" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2_all_params.json", "repo_id": "text-generation-inference", "token_count": 876 }
265
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": 0, "tokens": [ { "id": 288, "logprob": -0.2854004, "special": false, "text": "ing" }, { "id": 264, "logprob": -0.38061523, "special": false, "text": " a" }, { "id": 633, "logprob": -0.09301758, "special": false, "text": " new" }, { "id": 4480, "logprob": -0.26782227, "special": false, "text": " feature" }, { "id": 297, "logprob": -0.8510742, "special": false, "text": " in" }, { "id": 272, "logprob": -0.13464355, "special": false, "text": " the" }, { "id": 2039, "logprob": 0.0, "special": false, "text": " game" }, { "id": 28723, "logprob": -0.89990234, "special": false, "text": "." }, { "id": 13, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.10632324, "special": false, "text": "\n" } ], "top_tokens": null }, "generated_text": "Test requesting a new feature in the game.\n\n" }
text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_all_params.json", "repo_id": "text-generation-inference", "token_count": 860 }
266
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 2502, "logprob": null, "text": " red" }, { "id": 13, "logprob": -2.734375, "text": "," }, { "id": 8862, "logprob": -3.6875, "text": " yellow" }, { "id": 13, "logprob": -0.40234375, "text": "," }, { "id": 209, "logprob": -8.25, "text": " " } ], "seed": 0, "tokens": [ { "id": 187, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 395, "logprob": -0.3125, "special": false, "text": "and" }, { "id": 4797, "logprob": 0.0, "special": false, "text": " blue" }, { "id": 9830, "logprob": -2.25, "special": false, "text": " colors" }, { "id": 15, "logprob": 0.0, "special": false, "text": "." }, { "id": 329, "logprob": -2.296875, "special": false, "text": " A" }, { "id": 1180, "logprob": -2.046875, "special": false, "text": " number" }, { "id": 273, "logprob": 0.0, "special": false, "text": " of" }, { "id": 253, "logprob": -0.86328125, "special": false, "text": " the" }, { "id": 3295, "logprob": -0.55078125, "special": false, "text": " color" } ], "top_tokens": null }, "generated_text": "blue, red, yellow, \nand blue colors. A number of the color" }
text-generation-inference/integration-tests/models/__snapshots__/test_mamba/test_mamba_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mamba/test_mamba_all_params.json", "repo_id": "text-generation-inference", "token_count": 1155 }
267
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 8, "prefill": [], "seed": null, "tokens": [ { "id": 330, "logprob": -0.107421875, "special": false, "text": " A" }, { "id": 11426, "logprob": -0.30078125, "special": false, "text": " bee" }, { "id": 335, "logprob": -0.9609375, "special": false, "text": " on" }, { "id": 253, "logprob": -0.0703125, "special": false, "text": " a" }, { "id": 11986, "logprob": -0.5, "special": false, "text": " pink" }, { "id": 8525, "logprob": -0.09716797, "special": false, "text": " flower" }, { "id": 30, "logprob": -1.078125, "special": false, "text": "." }, { "id": 49154, "logprob": -0.110839844, "special": true, "text": "<end_of_utterance>" } ], "top_tokens": null }, "generated_text": " A bee on a pink flower." }
text-generation-inference/integration-tests/models/__snapshots__/test_smolvlm/test_flash_smolvlm_next_simple_url.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_smolvlm/test_flash_smolvlm_next_simple_url.json", "repo_id": "text-generation-inference", "token_count": 718 }
268
import pytest @pytest.fixture(scope="module") def compressed_tensors_wna16_int_24_handle(launcher): with launcher( "danieldk/Llama-3.1-8B-w4a16-int-24", num_shard=2, quantize="compressed-tensors", ) as handle: yield handle @pytest.fixture(scope="module") async def compressed_tensors_wna16_int_24(compressed_tensors_wna16_int_24_handle): await compressed_tensors_wna16_int_24_handle.health(300) return compressed_tensors_wna16_int_24_handle.client @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_compressed_tensors_wna16_int_24( compressed_tensors_wna16_int_24, response_snapshot ): response = await compressed_tensors_wna16_int_24.generate( "What is deep learning?", max_new_tokens=10, decoder_input_details=True, ) assert ( response.generated_text == "Deep learning is a subset of machine learning that uses" ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_compressed_tensors_wna16_int_24_all_params( compressed_tensors_wna16_int_24, response_snapshot ): response = await compressed_tensors_wna16_int_24.generate( "What is deep learning", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert ( response.generated_text == "What is deep learning?\nDeep learning (DL) is a subset of" ) assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_compressed_tensors_wna16_int_24_load( compressed_tensors_wna16_int_24, generate_load, response_snapshot ): responses = await generate_load( compressed_tensors_wna16_int_24, "What is deep learning?", max_new_tokens=10, n=4, ) assert ( responses[0].generated_text == "Deep learning is a subset of machine learning that uses" ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_compressed_tensors_wna16_int_24.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_compressed_tensors_wna16_int_24.py", "repo_id": "text-generation-inference", "token_count": 1080 }
269
import pytest @pytest.fixture(scope="module") def flash_llama_gptq_handle(launcher): with launcher( "astronomer/Llama-3-8B-Instruct-GPTQ-4-Bit", num_shard=2, quantize="gptq" ) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama_gptq(flash_llama_gptq_handle): await flash_llama_gptq_handle.health(300) return flash_llama_gptq_handle.client @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_gptq(flash_llama_gptq, response_snapshot): response = await flash_llama_gptq.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_gptq_all_params(flash_llama_gptq, response_snapshot): response = await flash_llama_gptq.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_gptq_load( flash_llama_gptq, generate_load, response_snapshot ): responses = await generate_load( flash_llama_gptq, "Test request", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_llama_gptq.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_llama_gptq.py", "repo_id": "text-generation-inference", "token_count": 769 }
270
import pytest @pytest.fixture(scope="module") def flash_qwen2_handle(launcher): with launcher("Qwen/Qwen1.5-0.5B") as handle: yield handle @pytest.fixture(scope="module") async def flash_qwen2(flash_qwen2_handle): await flash_qwen2_handle.health(300) return flash_qwen2_handle.client @pytest.mark.release @pytest.mark.asyncio async def test_flash_qwen2(flash_qwen2, response_snapshot): response = await flash_qwen2.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response.generated_text == "\n# Create a request\nrequest = requests.get" assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_flash_qwen2_all_params(flash_qwen2, response_snapshot): response = await flash_qwen2.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_flash_qwen2_load(flash_qwen2, generate_load, response_snapshot): responses = await generate_load(flash_qwen2, "Test request", max_new_tokens=10, n=4) assert len(responses) == 4 assert all( [r.generated_text == responses[0].generated_text for r in responses] ), f"{[r.generated_text for r in responses]}" assert responses[0].generated_text == "\n# Create a request\nrequest = requests.get" assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_qwen2.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_qwen2.py", "repo_id": "text-generation-inference", "token_count": 747 }
271
import pytest @pytest.fixture(scope="module") def fused_kernel_mamba_handle(launcher): with launcher("state-spaces/mamba-130m-hf", num_shard=1) as handle: yield handle @pytest.fixture(scope="module") async def fused_kernel_mamba(fused_kernel_mamba_handle): await fused_kernel_mamba_handle.health(300) return fused_kernel_mamba_handle.client @pytest.mark.release @pytest.mark.asyncio async def test_mamba(fused_kernel_mamba, response_snapshot): response = await fused_kernel_mamba.generate( "What is Deep Learning?", max_new_tokens=10 ) assert response.details.generated_tokens == 10 assert response.generated_text == "\n\nDeep learning is a new type of machine" assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_mamba_all_params(fused_kernel_mamba, response_snapshot): response = await fused_kernel_mamba.generate( "blue, red, yellow, ", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert ( response.generated_text == "blue, red, yellow, \nand blue colors. A number of the color" ) assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_mamba_load( fused_kernel_mamba, generate_load, generous_response_snapshot ): responses = await generate_load( fused_kernel_mamba, "What is Deep Learning?", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert responses[0].generated_text == "\n\nDeep learning is a new type of machine" assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses[0].generated_text == "\n\nDeep learning is a new type of machine" assert responses == generous_response_snapshot
text-generation-inference/integration-tests/models/test_mamba.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_mamba.py", "repo_id": "text-generation-inference", "token_count": 825 }
272
[tool.poetry] name = "text-generation-inference-benchmarks" version = "0.1.0" description = "" authors = ["Hugo Larcher <hugo.larcher@huggingface.co>"] readme = "README.md" [tool.poetry.dependencies] python = "^3.11" docker = "^7.1.0" loguru = "^0.7.2" psutil = "^6.0.0" gputil = "^1.4.0" pandas = "^2.2.3" pyarrow = "^17.0.0" [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api"
text-generation-inference/load_tests/pyproject.toml/0
{ "file_path": "text-generation-inference/load_tests/pyproject.toml", "repo_id": "text-generation-inference", "token_count": 195 }
273
use crate::infer::InferError; use crate::{ FunctionDefinition, FunctionRef, FunctionsMap, JsonSchemaTool, Properties, Tool, ToolChoice, }; use serde_json::{json, Map, Value}; use std::collections::HashMap; pub(crate) struct ToolGrammar {} impl ToolGrammar { // find a tool by name fn find_tool_by_name(tools: &[Tool], name: &str) -> Result<Tool, InferError> { tools .iter() .find(|tool| tool.function.name == name) .cloned() .ok_or_else(|| InferError::ToolError(format!("Tool with name {} not found", name))) } pub fn apply( tools: Vec<Tool>, tool_choice: ToolChoice, ) -> Result<Option<(Vec<Tool>, JsonSchemaTool)>, InferError> { let tools_to_use = match tool_choice { ToolChoice::Function(function) => { vec![Self::find_tool_by_name(&tools, &function.name)?] } ToolChoice::Required => tools, ToolChoice::Auto => { // only add the no_tool function if the user has selected the auto option tools .iter() .cloned() .chain(std::iter::once(Tool { r#type: "function".to_string(), function: FunctionDefinition { name: "no_tool".to_string(), description: Some( "Open ended response with no specific tool selected".to_string(), ), arguments: json!({ "type": "object", // "properties": { // "content": { // "type": "string", // "description": "The response content", // } // }, // "required": ["content"] }), }, })) .collect::<Vec<_>>() } ToolChoice::NoTool => vec![], }; // if no tools are provided or if the user has selected the no_tool option, return None if tools_to_use.is_empty() { return Ok(None); } let functions: HashMap<String, serde_json::Value> = tools_to_use .iter() .map(|tool| { let func = tool.function.clone(); let mut params = Map::new(); params.insert( "description".to_string(), Value::String(func.description.unwrap_or_default()), ); let mut properties = Map::new(); let mut required = vec![Value::String("_name".to_string())]; properties.insert( "_name".to_string(), json!({ "type": "string", "const": func.name.clone(), }), ); if let Value::Object(args) = func.arguments { if let Some(Value::Object(props)) = args.get("properties") { properties.extend(props.clone()); } if let Some(Value::Array(reqs)) = args.get("required") { required.extend(reqs.clone()); } params.insert( "additionalProperties".to_string(), Value::Bool( args.get("additionalProperties").and_then(|v| v.as_str()) == Some("true"), ), ); } params.insert("properties".to_string(), Value::Object(properties)); params.insert("required".to_string(), Value::Array(required)); (func.name, Value::Object(params)) }) .collect(); let tool_schema = JsonSchemaTool { functions_map: FunctionsMap { functions }, properties: Properties { function: tools_to_use .iter() .map(|tool| FunctionRef { ref_path: format!("#/$functions/{}", tool.function.name.clone()), }) .collect(), }, }; Ok(Some((tools_to_use, tool_schema))) } }
text-generation-inference/router/src/infer/tool_grammar.rs/0
{ "file_path": "text-generation-inference/router/src/infer/tool_grammar.rs", "repo_id": "text-generation-inference", "token_count": 2647 }
274
flash_att_commit := ceee0de88c037ee6eda5e75c813a8648e4bcb1c9 build-flash-attention: if [ ! -d 'flash-attention' ]; then \ pip install -U packaging ninja --no-cache-dir && \ git clone https://github.com/Narsil/flash-attention.git; \ fi cd flash-attention && git fetch && git checkout $(flash_att_commit) && \ MAX_JOBS=8 python setup.py build && cd csrc/layer_norm && python setup.py build && cd ../rotary && python setup.py build install-flash-attention: build-flash-attention cd flash-attention && git checkout $(flash_att_commit) && MAX_JOBS=8 python setup.py install && cd csrc/layer_norm && python setup.py install && cd ../rotary && python setup.py install
text-generation-inference/server/Makefile-flash-att/0
{ "file_path": "text-generation-inference/server/Makefile-flash-att", "repo_id": "text-generation-inference", "token_count": 236 }
275
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _q4_matmul_cuh #define _q4_matmul_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> #include <ATen/cuda/CUDAContext.h> #include "q4_matrix.cuh" #include "../tuning.h" void q4_matmul_cuda ( ExLlamaTuning* tuningParams, const half* x, const int x_height, const Q4Matrix* w, half* out, bool no_zero, cudaStream_t alt_stream ); void q4_matmul_recons_cuda ( ExLlamaTuning* tuningParams, const half* x, const int x_height, Q4Matrix* w, half* out, bool no_zero, const cublasHandle_t handle ); #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh", "repo_id": "text-generation-inference", "token_count": 322 }
276
#include "compat.cuh" __forceinline__ __device__ half2 dot22_8(half2(&dq)[4], const half* a_ptr, const half2 g_result) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); return __hadd2(result, g_result); } __forceinline__ __device__ float dot22_8_f(half2(&dq)[4], const half* a_ptr) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); return __half2float(__low2half(result)) + __half2float(__high2half(result)); } __forceinline__ __device__ half2 dot22_8_h2(half2(&dq)[4], const half* a_ptr) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); return result; } typedef void (*fp_gemm_half_q_half_gptq_kernel) ( const half*, const uint32_t*, const uint32_t*, const half*, half*, const int, const int, const int, const int, const int, const uint16_t*, const int, const bool, const half*, const int ); template <int m_count, bool use_r_weights, bool mul_r_weights> __global__ void gemm_half_q_half_gptq_kernel ( const half* __restrict__ a, const uint32_t* __restrict__ b_q_weight, const uint32_t* __restrict__ b_gptq_qzeros, const half* __restrict__ b_gptq_scales, half* __restrict__ c, const int size_m, const int size_n, const int size_k, const int groups, const int groupsize, const uint16_t* __restrict__ b_q_perm, const int rows_4, const bool clear, const half* r_weights, const int r_weights_stride ) { MatrixView_half a_(a, size_m, size_k); MatrixView_half_rw c_(c, size_m, size_n); MatrixView_q4_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n); MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n); int t = threadIdx.x; // Block int offset_n = blockIdx.x * GPTQ_BLOCK_KN_SIZE * 4; int offset_m = blockIdx.y * m_count; int offset_k = blockIdx.z * GPTQ_BLOCK_KN_SIZE; int end_n = min(offset_n + GPTQ_BLOCK_KN_SIZE * 4, size_n); int end_m = min(offset_m + m_count, size_m); int end_k = min(offset_k + GPTQ_BLOCK_KN_SIZE, size_k); int n = offset_n + t * 4; // Read weights half_uint16 weights[MAX_Q_GEMM_WEIGHTS]; if constexpr (use_r_weights) { uint16_t any_w = 0; const half* w_ptr = r_weights; for (int m = 0; m < m_count; ++m) { weights[m].as_half = *w_ptr; w_ptr += r_weights_stride; any_w |= weights[m].as_uint16; } if (!any_w) return; // Early exit if all weights are zero -- does not zero output (!!!) } // Preload block_a __shared__ half block_a[m_count][GPTQ_BLOCK_KN_SIZE]; if (offset_k + t < end_k) { for (int m = 0; m < m_count; ++m) { const half* a_ptr = a_.item_ptr(offset_m + m, 0); half* block_a_ptr = block_a[m]; half a0; if (b_q_perm) a0 = a_ptr[b_q_perm[offset_k + t]]; else a0 = a_ptr[offset_k + t]; block_a_ptr[t] = a0; } } // Zero output if (n >= size_n) return; if (clear && blockIdx.z == 0) // && (threadIdx.x & 1) == 0) { for (int m = 0; m < m_count; m++) *((uint64_t*)c_.item_ptr(offset_m + m, n)) = 0; } __syncthreads(); // Find initial group int group = offset_k / groupsize; int nextgroup = offset_k + groupsize; // a, b offset int qk = offset_k / (32 / 4); const uint32_t* b_ptr = b_q_weight + qk * size_n + n; const half* a_ptr = &block_a[0][0]; int a_stride = GPTQ_BLOCK_KN_SIZE; // Initial group int zeros[4]; half2 scales[4]; half2 z1z16[4][2]; half2 y1y16[4][2]; b_gptq_qzeros_.item4(zeros, group, n); b_gptq_scales_.item4_h2(scales, group, n); dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]); dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]); dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]); dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]); // __syncthreads(); // Column result half2 block_c[m_count][4] = {}; // Dequantize and multiply int k = offset_k; while (k < end_k) { if (k == nextgroup) { group++; nextgroup += groupsize; b_gptq_qzeros_.item4(zeros, group, n); b_gptq_scales_.item4_h2(scales, group, n); dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]); dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]); dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]); dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]); } #pragma unroll for (int j = 0; j < 4; j++) { const int4* b_ptr4 = (int4*) b_ptr; int4 load_int4 = *b_ptr4; half2 dq[4][4]; dequant_4bit_8_gptq(load_int4.x, dq[0], z1z16[0], y1y16[0], size_n, false); dequant_4bit_8_gptq(load_int4.y, dq[1], z1z16[1], y1y16[1], size_n, false); dequant_4bit_8_gptq(load_int4.z, dq[2], z1z16[2], y1y16[2], size_n, false); dequant_4bit_8_gptq(load_int4.w, dq[3], z1z16[3], y1y16[3], size_n, false); #pragma unroll for (int m = 0; m < m_count; m++) { if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } block_c[m][0] = __hfma2(dot22_8_h2(dq[0], a_ptr + m * a_stride), scales[0], block_c[m][0]); block_c[m][1] = __hfma2(dot22_8_h2(dq[1], a_ptr + m * a_stride), scales[1], block_c[m][1]); block_c[m][2] = __hfma2(dot22_8_h2(dq[2], a_ptr + m * a_stride), scales[2], block_c[m][2]); block_c[m][3] = __hfma2(dot22_8_h2(dq[3], a_ptr + m * a_stride), scales[3], block_c[m][3]); } b_ptr += size_n; a_ptr += 8; } k += 32; } for (int m = 0; m < m_count; m++) { half2 *out = (half2*) c_.item_ptr(offset_m + m, n); half result0 = __hadd(__low2half(block_c[m][0]), __high2half(block_c[m][0])); half result1 = __hadd(__low2half(block_c[m][1]), __high2half(block_c[m][1])); half result2 = __hadd(__low2half(block_c[m][2]), __high2half(block_c[m][2])); half result3 = __hadd(__low2half(block_c[m][3]), __high2half(block_c[m][3])); half2 result01 = __halves2half2(result0, result1); half2 result23 = __halves2half2(result2, result3); if constexpr (mul_r_weights) { half2 w_mul2 = __half2half2(weights[m].as_half); result01 = __hmul2(result01, w_mul2); result23 = __hmul2(result23, w_mul2); } atomicAdd(out , result01); atomicAdd(out + 1, result23); } } template <bool use_r_weights, bool mul_r_weights> struct map_m_count_gptq { static constexpr fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel(int m_count) { #if GPTQ_BLOCK_M_SIZE_MAX >= 1 if (m_count == 1) return gemm_half_q_half_gptq_kernel<1, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 2 if (m_count == 2) return gemm_half_q_half_gptq_kernel<2, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 3 if (m_count == 3) return gemm_half_q_half_gptq_kernel<3, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 4 if (m_count == 4) return gemm_half_q_half_gptq_kernel<4, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 5 if (m_count == 5) return gemm_half_q_half_gptq_kernel<5, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 6 if (m_count == 6) return gemm_half_q_half_gptq_kernel<6, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 7 if (m_count == 7) return gemm_half_q_half_gptq_kernel<7, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 8 if (m_count == 8) return gemm_half_q_half_gptq_kernel<8, use_r_weights, mul_r_weights>; #endif return NULL; } }; fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel(const int m_count, bool r_weights, bool mul_r_weights) { if (!r_weights && !mul_r_weights) return map_m_count_gptq<false, false>::pick_gemm_half_q_half_gptq_kernel(m_count); if (!r_weights && mul_r_weights) return map_m_count_gptq<false, true>::pick_gemm_half_q_half_gptq_kernel(m_count); if ( r_weights && !mul_r_weights) return map_m_count_gptq< true, false>::pick_gemm_half_q_half_gptq_kernel(m_count); if ( r_weights && mul_r_weights) return map_m_count_gptq< true, true>::pick_gemm_half_q_half_gptq_kernel(m_count); return NULL; }
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh", "repo_id": "text-generation-inference", "token_count": 4839 }
277
import pytest import torch from text_generation_server.utils.weights import ( DefaultWeightsLoader, Weights, WeightsLoader, ) from text_generation_server.layers.gptq import GPTQWeight, GPTQWeightsLoader from text_generation_server.layers.exl2 import Exl2Weight, Exl2WeightsLoader from text_generation_server.layers.marlin.marlin import ( MarlinWeight, MarlinWeightsLoader, ) from types import SimpleNamespace from typing import List, Optional, Dict, Union from pathlib import Path @pytest.fixture def gptq_weights_loader(): return GPTQWeightsLoader( bits=4, groupsize=-1, desc_act=False, quant_method="gptq", quantize="gptq", sym=True, modules_to_not_convert=[], ) @pytest.fixture def gptq_weights_loader_awq(): return GPTQWeightsLoader( bits=4, groupsize=-1, desc_act=False, quant_method="awq", quantize="awq", sym=True, modules_to_not_convert=[], ) @pytest.fixture def marlin_weights_loader(): return MarlinWeightsLoader(bits=4, is_marlin_24=False) dummy_file_system = { "test_weights": { "layer.0.weight": torch.tensor( [ [1, 2], [3, 4], ], dtype=torch.float32, ), }, "test_weights_2": { "layer.1337.weight": torch.tensor( [ [1, 2, 3, 4], [5, 6, 7, 8], ], dtype=torch.float32, ), }, "test_get_weights_col_packed": { "weight.weight": torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.float32, ), }, "test_get_multi_weights_col": { "weight.weight": torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.float32, ), }, "test_get_weights_row": { "weight.weight": torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.float32, ), }, "test_get_weights_col_gptq": { "weight.qweight": torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.float32, ), "weight.g_idx": torch.tensor([0, 1, 0, 1], dtype=torch.int32), "weight.qzeros": torch.tensor( [ [0, 1], [1, 0], ], dtype=torch.int32, ), "weight.scales": torch.tensor( [ [100.0, 100.0], [100.0, 100.0], ], dtype=torch.float16, ), "gptq_bits": torch.tensor([8], dtype=torch.float32), "gptq_groupsize": torch.tensor([2], dtype=torch.float32), }, "test_get_weights_col_marlin": { "weight.B": torch.tensor([[1, 2], [3, 4]], dtype=torch.int32), "weight.s": torch.tensor([[0.5000], [0.2500]], dtype=torch.float16), }, "test_get_weights_row_gptq": { "weight.qweight": torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.int32, ), "weight.g_idx": torch.tensor([0, 1, 0, 1], dtype=torch.int32), "weight.qzeros": torch.tensor( [ [0, 1], [1, 0], ], dtype=torch.int32, ), "weight.scales": torch.tensor( [ [100.0, 100.0], [100.0, 100.0], ], dtype=torch.float16, ), "gptq_bits": torch.tensor([8], dtype=torch.float32), "gptq_groupsize": torch.tensor([2], dtype=torch.float32), }, "test_get_multi_weights_col_gptq": { "weight.qweight": torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.int32, ), "weight.g_idx": torch.tensor([0, 1, 0, 1], dtype=torch.int32), "weight.qzeros": torch.tensor( [ [0, 1], [1, 0], ], dtype=torch.int32, ), "weight.scales": torch.tensor( [ [100.0, 100.0], [100.0, 100.0], ], dtype=torch.float16, ), "gptq_bits": torch.tensor([8], dtype=torch.float32), "gptq_groupsize": torch.tensor([2], dtype=torch.float32), }, "test_get_weights_col_packed_gptq": { "weight.qweight": torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.int32, ), "weight.g_idx": torch.tensor([0, 1, 0, 1], dtype=torch.int32), "weight.qzeros": torch.tensor( [ [0, 1], [1, 0], ], dtype=torch.int32, ), "weight.scales": torch.tensor( [ [100.0, 100.0], [100.0, 100.0], ], dtype=torch.float16, ), "gptq_bits": torch.tensor([8], dtype=torch.float32), "gptq_groupsize": torch.tensor([2], dtype=torch.float32), }, "test_get_weights_col_packed_exl2": { "weight.q_weight": torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.int32, ), "weight.q_scale": torch.tensor([8], dtype=torch.int32), "weight.q_invperm": torch.tensor([1, 0, 3, 2], dtype=torch.int32), "weight.q_scale_max": torch.tensor([100], dtype=torch.float16), "weight.q_groups": torch.tensor([4], dtype=torch.int16), }, "test_get_weights_row_exl2": { "weight.q_weight": torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.int32, ), "weight.q_scale": torch.tensor([8], dtype=torch.int32), "weight.q_invperm": torch.tensor([1, 0, 3, 2], dtype=torch.int32), "weight.q_scale_max": torch.tensor([100], dtype=torch.float16), "weight.q_groups": torch.tensor([4], dtype=torch.int16), }, "test_get_multi_weights_col_exl2": { "weight.q_weight": torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.int32, ), "weight.q_scale": torch.tensor([8], dtype=torch.int32), "weight.q_invperm": torch.tensor([1, 0, 3, 2], dtype=torch.int32), "weight.q_scale_max": torch.tensor([100], dtype=torch.float16), "weight.q_groups": torch.tensor([4], dtype=torch.int16), }, "test_get_weights_col_exl2": { "weight.q_weight": torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.int32, ), "weight.q_scale": torch.tensor([8], dtype=torch.int32), "weight.q_invperm": torch.tensor([1, 0, 3, 2], dtype=torch.int32), "weight.q_scale_max": torch.tensor([100], dtype=torch.float16), "weight.q_groups": torch.tensor([4], dtype=torch.int16), }, "test_get_weights_row_marlin": { "weight.B": torch.tensor([[1, 2], [3, 4]], dtype=torch.int32), "weight.s": torch.tensor([[0.5], [0.25]], dtype=torch.float16), }, "test_get_multi_weights_col_marlin": { "weight.B": torch.tensor([[1, 2], [3, 4]], dtype=torch.int32), "weight.s": torch.tensor([[0.5], [0.25]], dtype=torch.float16), }, "test_get_weights_col_packed_marlin": { "weight.B": torch.tensor([[1, 2], [3, 4]], dtype=torch.int32), "weight.s": torch.tensor([[0.5], [0.25]], dtype=torch.float16), }, } class MockSlice: def __init__(self, tensor): self.tensor = tensor def get_shape(self): return self.tensor.shape def __getitem__(self, idx): return self.tensor[idx] def mock_get_slice(tensor_name, filename): tensor = dummy_file_system[filename][tensor_name] return MockSlice(tensor) def mock_handle(filename, device, dtype): return SimpleNamespace( get_slice=lambda tensor_name: mock_get_slice(tensor_name, filename) ) class MockSafeOpen: def __init__(self, filename, framework, dummy_fs): self.filename = filename self.framework = framework self.dummy_fs = dummy_fs def keys(self): return list(self.dummy_fs[self.filename].keys()) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass class MockWeights(Weights): def __init__( self, filenames: List[Union[Path, str]], device, dtype, process_group, dummy_fs, aliases: Optional[Dict[str, List[str]]] = None, prefix: Optional[str] = None, weights_loader: Optional[WeightsLoader] = None, ): routing = {} self.dummy_fs = dummy_fs for filename in filenames: with MockSafeOpen(filename, framework="pytorch", dummy_fs=dummy_fs) as f: for k in f.keys(): if k in routing: raise RuntimeError( f"Key {k} was found in multiple files: {filename} and {routing[k]}" ) routing[k] = filename if aliases is None: aliases = {} self.aliases = aliases self.routing = routing self.device = device self.dtype = dtype self.process_group = process_group self.prefix = prefix self.weights_loader = ( # We don't need to get linear layers, so just wrap raw tensors. DefaultWeightsLoader(lambda x: x) if weights_loader is None else weights_loader ) self._handles = {} def _get_handle(self, filename: Union[Path, str]): if filename in self._handles: return self._handles[filename] else: handle = mock_handle(filename, self.device, self.dtype) self._handles[filename] = handle return handle def get_shape(self, tensor_name: str): filename, _ = self.get_filename(tensor_name) handle = self._get_handle(filename) return handle.get_slice(tensor_name).get_shape() def get_tensor(self, tensor_name: str): filename, _ = self.get_filename(tensor_name) handle = self._get_handle(filename) return handle.get_slice(tensor_name).tensor dummy_process_group = SimpleNamespace(rank=lambda: 0, size=lambda: 1) def test_weights(): weights = MockWeights( [ "test_weights", "test_weights_2", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, ) assert weights.get_shape("layer.0.weight") == (2, 2) assert weights.get_tensor("layer.1337.weight").shape == (2, 4) def test_get_tensor(): weights = MockWeights( [ "test_weights", "test_weights_2", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, ) assert torch.allclose( weights.get_tensor("layer.0.weight"), torch.tensor( [ [1, 2], [3, 4], ], dtype=torch.float32, ), ) assert torch.allclose( weights.get_tensor("layer.1337.weight"), torch.tensor( [ [1, 2, 3, 4], [5, 6, 7, 8], ], dtype=torch.float32, ), ) def test_get_weights_col_packed(): weights = MockWeights( [ "test_get_weights_col_packed", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, ) prefix = "weight" block_sizes = 1 w = weights.get_weights_col_packed( prefix=prefix, block_sizes=block_sizes, ) assert torch.allclose( w, torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.float32, ), ) def test_get_weights_col_packed_block_size(): weights = MockWeights( [ "test_get_weights_col_packed", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, ) prefix = "weight" block_sizes = 2 w = weights.get_weights_col_packed( prefix=prefix, block_sizes=block_sizes, ) assert torch.allclose( w, torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.float32, ), ) def test_get_weights_col_packed_block_size_arr(): weights = MockWeights( [ "test_get_weights_col_packed", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, ) prefix = "weight" block_sizes = [1, 1] w = weights.get_weights_col_packed( prefix=prefix, block_sizes=block_sizes, ) assert torch.allclose( w, torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.float32, ), ) def test_get_multi_weights_col(): weights = MockWeights( [ "test_get_multi_weights_col", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, ) prefixes = ["weight", "weight"] w = weights.get_multi_weights_col( prefixes=prefixes, dim=0, ) assert torch.allclose( w, torch.tensor( [ [1, 2], [3, 4], [5, 6], [7, 8], [1, 2], [3, 4], [5, 6], [7, 8], ], dtype=torch.float32, ), ) def test_get_weights_row(): weights = MockWeights( [ "test_get_weights_row", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, ) prefix = "weight" w = weights.get_weights_row( prefix=prefix, ) assert torch.allclose( w, torch.tensor( [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]], dtype=torch.float32, ), ) # test_get_weights_col def test_get_weights_col_awq(gptq_weights_loader_awq): weights = MockWeights( [ "test_get_weights_col_gptq", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=gptq_weights_loader_awq, ) prefix = "weight" w = weights.get_weights_col( prefix=prefix, ) expected_weight = GPTQWeight( qweight=torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]), qzeros=torch.tensor([[0, 1], [1, 0]], dtype=torch.int32), scales=torch.tensor( [[100.0, 100.0], [100.0, 100.0]], dtype=torch.float16, ), g_idx=None, bits=8.0, groupsize=2.0, use_awq_kernel=True, use_exllama=False, ) assert torch.allclose(w.qweight, expected_weight.qweight), "qweight mismatch" assert torch.allclose(w.qzeros, expected_weight.qzeros), "qzeros mismatch" assert torch.allclose(w.scales, expected_weight.scales), "scales mismatch" assert w.g_idx == expected_weight.g_idx, "g_idx mismatch" assert w.bits == expected_weight.bits, "bits mismatch" assert w.groupsize == expected_weight.groupsize, "groupsize mismatch" assert w.use_awq_kernel == expected_weight.use_awq_kernel, "use_awq_kernel mismatch" assert w.use_exllama == expected_weight.use_exllama, "use_exllama mismatch" def test_get_weights_col_gtpq(gptq_weights_loader): weights = MockWeights( [ "test_get_weights_col_gptq", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=gptq_weights_loader, ) prefix = "weight" w = weights.get_weights_col( prefix=prefix, ) expected_weight = GPTQWeight( qweight=torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]), qzeros=torch.tensor([[0, 1], [1, 0]], dtype=torch.int32), scales=torch.tensor([[100.0, 100.0], [100.0, 100.0]], dtype=torch.float16), g_idx=torch.tensor([0, 1, 0, 1], dtype=torch.int32), bits=8.0, groupsize=2.0, use_awq_kernel=False, use_exllama=False, ) assert torch.allclose(w.qweight, expected_weight.qweight), "qweight mismatch" assert torch.allclose(w.qzeros, expected_weight.qzeros), "qzeros mismatch" assert torch.allclose(w.scales, expected_weight.scales), "scales mismatch" assert torch.allclose(w.g_idx, expected_weight.g_idx), "g_idx mismatch" assert w.bits == expected_weight.bits, "bits mismatch" assert w.groupsize == expected_weight.groupsize, "groupsize mismatch" assert w.use_awq_kernel == expected_weight.use_awq_kernel, "use_awq_kernel mismatch" assert w.use_exllama == expected_weight.use_exllama, "use_exllama mismatch" def test_get_weights_col_exl2(): weights = MockWeights( [ "test_get_weights_col_exl2", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=Exl2WeightsLoader(), ) prefix = "weight" w = weights.get_weights_col( prefix=prefix, ) scaled_scale_max = 0.3906 * 256 expected_weight = Exl2Weight( q_weight=torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=torch.int32), q_scale=torch.tensor([8], dtype=torch.int32), q_invperm=torch.tensor([1, 0, 3, 2], dtype=torch.int16), q_scale_max=torch.tensor([scaled_scale_max], dtype=torch.float16), q_groups=torch.tensor([4], dtype=torch.int16), ) assert torch.allclose(w.q_weight, expected_weight.q_weight), "q_weight mismatch" assert torch.allclose(w.q_scale, expected_weight.q_scale), "q_scale mismatch" assert torch.allclose(w.q_invperm, expected_weight.q_invperm), "q_invperm mismatch" assert torch.allclose( w.q_scale_max, expected_weight.q_scale_max ), "q_scale_max mismatch" assert torch.allclose(w.q_groups, expected_weight.q_groups), "q_groups mismatch" def test_get_weights_col_marlin(marlin_weights_loader): weights = MockWeights( [ "test_get_weights_col_marlin", ], device="cpu", dtype=torch.float16, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=marlin_weights_loader, ) prefix = "weight" w = weights.get_weights_col( prefix=prefix, ) expected_weight = MarlinWeight( B=torch.tensor([[1, 2], [3, 4]], dtype=torch.int32), s=torch.tensor([[0.5000], [0.2500]], dtype=torch.float16), ) assert torch.allclose(w.B, expected_weight.B), "B mismatch" assert torch.allclose(w.s, expected_weight.s), "s mismatch" # test_get_weights_col_packed def test_get_weights_col_packed_awq(gptq_weights_loader_awq): weights = MockWeights( [ "test_get_weights_col_packed_gptq", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=gptq_weights_loader_awq, ) prefix = "weight" block_sizes = 1 w = weights.get_weights_col_packed( prefix=prefix, block_sizes=block_sizes, ) expected_weight = GPTQWeight( qweight=torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=torch.int32), qzeros=torch.tensor([[0, 1], [1, 0]], dtype=torch.int32), scales=torch.tensor([[100.0, 100.0], [100.0, 100.0]], dtype=torch.float16), g_idx=None, bits=8.0, groupsize=2.0, use_awq_kernel=True, use_exllama=False, ) assert torch.allclose(w.qweight, expected_weight.qweight), "qweight mismatch" assert torch.allclose(w.qzeros, expected_weight.qzeros), "qzeros mismatch" assert torch.allclose(w.scales, expected_weight.scales), "scales mismatch" assert w.g_idx == expected_weight.g_idx, "g_idx mismatch" assert w.bits == expected_weight.bits, "bits mismatch" assert w.groupsize == expected_weight.groupsize, "groupsize mismatch" assert w.use_awq_kernel == expected_weight.use_awq_kernel, "use_awq_kernel mismatch" assert w.use_exllama == expected_weight.use_exllama, "use_exllama mismatch" @pytest.mark.skip(reason="Review expected functionality") def test_get_weights_col_packed_exl2(): weights = MockWeights( [ "test_get_weights_col_packed_exl2", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=Exl2WeightsLoader(), ) prefix = "weight" block_sizes = 1 w = weights.get_weights_col_packed( prefix=prefix, block_sizes=block_sizes, ) scaled_scale_max = 0.3906 * 256 expected_weight = Exl2Weight( q_weight=torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=torch.int32), q_scale=torch.tensor([8], dtype=torch.int32), q_invperm=torch.tensor([1], dtype=torch.int16), q_scale_max=torch.tensor([scaled_scale_max], dtype=torch.float16), q_groups=torch.tensor([4], dtype=torch.int16), ) assert torch.allclose(w.q_weight, expected_weight.q_weight), "q_weight mismatch" assert torch.allclose(w.q_scale, expected_weight.q_scale), "q_scale mismatch" assert torch.allclose(w.q_invperm, expected_weight.q_invperm), "q_invperm mismatch" assert torch.allclose( w.q_scale_max, expected_weight.q_scale_max ), "q_scale_max mismatch" assert torch.allclose(w.q_groups, expected_weight.q_groups), "q_groups mismatch" def test_get_weights_col_packed_gptq(gptq_weights_loader): weights = MockWeights( [ "test_get_weights_col_packed_gptq", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=gptq_weights_loader, ) prefixes = ["weight"] w = weights.get_multi_weights_col( prefixes=prefixes, dim=0, ) expected_weight = GPTQWeight( qweight=torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=torch.int32), qzeros=torch.tensor([[0, 1], [1, 0]], dtype=torch.int32), scales=torch.tensor([[100.0, 100.0], [100.0, 100.0]], dtype=torch.float16), g_idx=torch.tensor([0, 1, 0, 1], dtype=torch.int32), bits=8.0, groupsize=2.0, use_awq_kernel=False, use_exllama=False, ) assert torch.allclose(w.qweight, expected_weight.qweight), "qweight mismatch" assert torch.allclose(w.qzeros, expected_weight.qzeros), "qzeros mismatch" assert torch.allclose(w.scales, expected_weight.scales), "scales mismatch" assert torch.allclose(w.g_idx, expected_weight.g_idx), "g_idx mismatch" assert w.bits == expected_weight.bits, "bits mismatch" assert w.groupsize == expected_weight.groupsize, "groupsize mismatch" assert w.use_awq_kernel == expected_weight.use_awq_kernel, "use_awq_kernel mismatch" assert w.use_exllama == expected_weight.use_exllama, "use_exllama mismatch" def test_get_weights_col_packed_marlin(marlin_weights_loader): weights = MockWeights( [ "test_get_weights_col_packed_marlin", ], device="cpu", dtype=torch.float16, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=marlin_weights_loader, ) prefix = "weight" w = weights.get_multi_weights_col( prefixes=[prefix], dim=0, ) expected_weight = MarlinWeight( B=torch.tensor([[1, 2], [3, 4]], dtype=torch.int32), s=torch.tensor([[0.5000], [0.2500]], dtype=torch.float16), ) print(expected_weight) assert torch.allclose(w.B, expected_weight.B), "B mismatch" assert torch.allclose(w.s, expected_weight.s), "s mismatch" # test_get_multi_weights_col def test_get_multi_weights_col_awq(gptq_weights_loader_awq): weights = MockWeights( [ "test_get_multi_weights_col_gptq", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=gptq_weights_loader_awq, ) prefixes = ["weight"] w = weights.get_multi_weights_col( prefixes=prefixes, dim=0, ) expected_weight = GPTQWeight( qweight=torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=torch.int32), qzeros=torch.tensor([[0, 1], [1, 0]], dtype=torch.int32), scales=torch.tensor([[100.0, 100.0], [100.0, 100.0]], dtype=torch.float16), g_idx=None, bits=8.0, groupsize=2.0, use_awq_kernel=True, use_exllama=False, ) assert torch.allclose(w.qweight, expected_weight.qweight), "qweight mismatch" assert torch.allclose(w.qzeros, expected_weight.qzeros), "qzeros mismatch" assert torch.allclose(w.scales, expected_weight.scales), "scales mismatch" assert w.g_idx == expected_weight.g_idx, "g_idx mismatch" assert w.bits == expected_weight.bits, "bits mismatch" assert w.groupsize == expected_weight.groupsize, "groupsize mismatch" assert w.use_awq_kernel == expected_weight.use_awq_kernel, "use_awq_kernel mismatch" assert w.use_exllama == expected_weight.use_exllama, "use_exllama mismatch" def test_get_multi_weights_col_exl2(): weights = MockWeights( [ "test_get_multi_weights_col_exl2", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=Exl2WeightsLoader(), ) prefix = "weight" try: weights.get_multi_weights_col( prefixes=[prefix], dim=0, ) except ValueError as e: assert e.args[0] == "get_multi_weights_col is not supported for exl2" def test_get_multi_weights_col_gptq(gptq_weights_loader): weights = MockWeights( [ "test_get_multi_weights_col_gptq", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=gptq_weights_loader, ) prefixes = ["weight"] w = weights.get_multi_weights_col( prefixes=prefixes, dim=0, ) expected_weight = GPTQWeight( qweight=torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=torch.int32), qzeros=torch.tensor([[0, 1], [1, 0]], dtype=torch.int32), scales=torch.tensor([[100.0, 100.0], [100.0, 100.0]], dtype=torch.float16), g_idx=torch.tensor([0, 1, 0, 1], dtype=torch.int32), bits=8.0, groupsize=2.0, use_awq_kernel=False, use_exllama=False, ) assert torch.allclose(w.qweight, expected_weight.qweight), "qweight mismatch" assert torch.allclose(w.qzeros, expected_weight.qzeros), "qzeros mismatch" assert torch.allclose(w.scales, expected_weight.scales), "scales mismatch" assert torch.allclose(w.g_idx, expected_weight.g_idx), "g_idx mismatch" assert w.bits == expected_weight.bits, "bits mismatch" assert w.groupsize == expected_weight.groupsize, "groupsize mismatch" assert w.use_awq_kernel == expected_weight.use_awq_kernel, "use_awq_kernel mismatch" assert w.use_exllama == expected_weight.use_exllama, "use_exllama mismatch" def test_get_multi_weights_col_marlin(marlin_weights_loader): weights = MockWeights( [ "test_get_multi_weights_col_marlin", ], device="cpu", dtype=torch.float16, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=marlin_weights_loader, ) prefix = "weight" w = weights.get_multi_weights_col( prefixes=[prefix], dim=0, ) expected_weight = MarlinWeight( B=torch.tensor([[1, 2], [3, 4]], dtype=torch.int32), s=torch.tensor([[0.5000], [0.2500]], dtype=torch.float16), ) assert torch.allclose(w.B, expected_weight.B), "B mismatch" assert torch.allclose(w.s, expected_weight.s), "s mismatch" # test_get_weights_row def test_get_weights_row_awq(gptq_weights_loader_awq): weights = MockWeights( [ "test_get_weights_row_gptq", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=gptq_weights_loader_awq, ) prefix = "weight" w = weights.get_weights_row( prefix=prefix, ) expected_weight = GPTQWeight( qweight=torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=torch.int32), qzeros=torch.tensor([[0, 1], [1, 0]], dtype=torch.int32), scales=torch.tensor([[100.0, 100.0], [100.0, 100.0]], dtype=torch.float16), g_idx=None, bits=8.0, groupsize=2.0, use_awq_kernel=True, use_exllama=False, ) assert torch.allclose(w.qweight, expected_weight.qweight), "qweight mismatch" assert torch.allclose(w.qzeros, expected_weight.qzeros), "qzeros mismatch" assert torch.allclose(w.scales, expected_weight.scales), "scales mismatch" assert w.g_idx == expected_weight.g_idx, "g_idx mismatch" assert w.bits == expected_weight.bits, "bits mismatch" assert w.groupsize == expected_weight.groupsize, "groupsize mismatch" assert w.use_awq_kernel == expected_weight.use_awq_kernel, "use_awq_kernel mismatch" assert w.use_exllama == expected_weight.use_exllama, "use_exllama mismatch" def test_get_weights_row_exl2(): weights = MockWeights( [ "test_get_weights_row_exl2", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=Exl2WeightsLoader(), ) prefix = "weight" w = weights.get_weights_row( prefix=prefix, ) print(w) scaled_scale_max = 0.3906 * 256 expected_weight = Exl2Weight( q_weight=torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=torch.int32), q_scale=torch.tensor([8], dtype=torch.int32), q_invperm=torch.tensor([1, 0, 3, 2], dtype=torch.int16), q_scale_max=torch.tensor([scaled_scale_max], dtype=torch.float16), q_groups=torch.tensor([4], dtype=torch.int16), ) assert torch.allclose(w.q_weight, expected_weight.q_weight), "q_weight mismatch" assert torch.allclose(w.q_scale, expected_weight.q_scale), "q_scale mismatch" assert torch.allclose(w.q_invperm, expected_weight.q_invperm), "q_invperm mismatch" assert torch.allclose( w.q_scale_max, expected_weight.q_scale_max ), "q_scale_max mismatch" assert torch.allclose(w.q_groups, expected_weight.q_groups), "q_groups mismatch" def test_get_weights_row_gptq(gptq_weights_loader): weights = MockWeights( [ "test_get_weights_row_gptq", ], device="cpu", dtype=torch.float32, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=gptq_weights_loader, ) prefix = "weight" w = weights.get_weights_row( prefix=prefix, ) expected_weight = GPTQWeight( qweight=torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=torch.int32), qzeros=torch.tensor([[0, 1], [1, 0]], dtype=torch.int32), scales=torch.tensor([[100.0, 100.0], [100.0, 100.0]], dtype=torch.float16), g_idx=torch.tensor([0, 1, 0, 1], dtype=torch.int32), bits=8.0, groupsize=2.0, use_awq_kernel=False, use_exllama=False, ) assert torch.allclose(w.qweight, expected_weight.qweight), "qweight mismatch" assert torch.allclose(w.qzeros, expected_weight.qzeros), "qzeros mismatch" assert torch.allclose(w.scales, expected_weight.scales), "scales mismatch" assert torch.allclose(w.g_idx, expected_weight.g_idx), "g_idx mismatch" assert w.bits == expected_weight.bits, "bits mismatch" assert w.groupsize == expected_weight.groupsize, "groupsize mismatch" assert w.use_awq_kernel == expected_weight.use_awq_kernel, "use_awq_kernel mismatch" assert w.use_exllama == expected_weight.use_exllama, "use_exllama mismatch" def test_get_weights_row_marlin(marlin_weights_loader): weights = MockWeights( [ "test_get_weights_row_marlin", ], device="cpu", dtype=torch.float16, process_group=dummy_process_group, dummy_fs=dummy_file_system, weights_loader=marlin_weights_loader, ) prefix = "weight" w = weights.get_weights_row( prefix=prefix, ) expected_weight = MarlinWeight( B=torch.tensor([[1, 2], [3, 4]], dtype=torch.int32), s=torch.tensor([[0.5000], [0.2500]], dtype=torch.float16), ) assert torch.allclose(w.B, expected_weight.B), "B mismatch" assert torch.allclose(w.s, expected_weight.s), "s mismatch"
text-generation-inference/server/tests/utils/test_weights.py/0
{ "file_path": "text-generation-inference/server/tests/utils/test_weights.py", "repo_id": "text-generation-inference", "token_count": 17962 }
278
from typing import Tuple from dataclasses import dataclass, field from loguru import logger import torch from text_generation_server.layers.fp8 import fp8_quantize from text_generation_server.models.globals import ATTENTION, BLOCK_SIZE from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.kernels import load_kernel from text_generation_server.utils.log import log_once from text_generation_server.utils.weights import Weights if SYSTEM == "cuda": try: paged_attention = load_kernel( module="paged_attention", repo_id="kernels-community/paged-attention" ) except Exception as e: raise ImportError( f"Could not import attention kernels. Make sure your installation is correct. Complete error: {e}" ) else: paged_attention = None @dataclass class KVScales: """ Key-value scales for FP8 KV cache. This data class stores key and value scales both as a GPU tensor and as a GPU float. This inconvenience is necessary because some functions (e.g. scaling kernels) take scales as a GPU tensor, whereas others (e.g. flashinfer) take scales as a CPU scalar. """ key_scale: torch.Tensor value_scale: torch.Tensor key_scale_cpu: float = field(init=False) value_scale_cpu: float = field(init=False) def __post_init__(self): if self.key_scale.numel() != 1 or self.value_scale.numel() != 1: raise ValueError("Key and value scales must be scalar tensors.") self.key_scale_cpu = self.key_scale.item() self.value_scale_cpu = self.value_scale.item() class KVCache: """ Key-value cache for attention layers. """ kv_cache: Tuple[torch.Tensor, torch.Tensor] def __init__( self, *, num_blocks: int, num_heads: int, head_size: int, dtype: torch.dtype, device: torch.device, ): """Construct the key-value cache for a layer.""" if dtype in {torch.float8_e5m2, torch.float8_e4m3fn}: if not ( (ATTENTION == "flashinfer" and SYSTEM == "cuda") or (ATTENTION == "paged" and SYSTEM in ("cuda", "rocm", "ipex")) or (ATTENTION == "flashdecoding-ipex") ): raise ValueError( "FP8 KV cache is currently only supported for flashinfer on CUDA and paged attention on CUDA, ROCm and INTEL IPEX and flashdecoding in Intel IPEX " ) if SYSTEM == "rocm" and dtype == torch.float8_e5m2: raise ValueError( "float8_e5m2 FP8 KV cache is not supported on AMD ROCm" ) if device.type == "cpu" and dtype == torch.float8_e4m3fn: raise ValueError( "float8_e4m3fn FP8 KV cache is not supported on Intel IPEX CPU" ) element_size = torch.tensor([], dtype=dtype).element_size() if SYSTEM == "ipex" and device.type == "xpu": x = 1 else: x = BLOCK_SIZE // element_size if ATTENTION in {"flashdecoding", "flashinfer"} or ( ATTENTION == "flashdecoding-ipex" and device.type == "xpu" ): self.kv_cache = ( torch.empty( (num_blocks, BLOCK_SIZE, num_heads, head_size), dtype=dtype, device=device, ), torch.empty( (num_blocks, BLOCK_SIZE, num_heads, head_size), dtype=dtype, device=device, ), ) elif SYSTEM == "ipex" and device == torch.device("cpu"): # ipex cpu flashdecoding kernel and paged attention kernel share same layout self.kv_cache = ( torch.empty( (num_blocks, num_heads, BLOCK_SIZE, head_size), dtype=dtype, device=device, ), torch.empty( (num_blocks, num_heads, BLOCK_SIZE, head_size), dtype=dtype, device=device, ), ) else: self.kv_cache = ( torch.zeros( (num_blocks, num_heads, head_size // x, BLOCK_SIZE, x), dtype=dtype, device=device, ), torch.zeros( (num_blocks, num_heads, head_size, BLOCK_SIZE), dtype=dtype, device=device, ), ) def can_scale(self, kv_scales: KVScales) -> bool: """Check if the cache can be scaled by the given scales.""" if kv_scales.key_scale_cpu == 1.0 and kv_scales.value_scale_cpu == 1.0: return False elif self.dtype == torch.float8_e4m3fn and ( (ATTENTION in ("paged", "flashinfer") and SYSTEM == "cuda") or (ATTENTION == "paged" and SYSTEM in ["rocm", "ipex"]) or (ATTENTION == "flashdecoding-ipex") ): log_once(logger.info, "Using FP8 KV cache scales") return True else: # We have scales, but not the correct FP8 cache type, so warn once. log_once( logger.info, "Ignoring FP8 KV cache scales, supported only for float8_e4m3fn KV cache with flashinfer on CUDA and paged attention on ROCm/IPEX and flashdecoding on IPEX", ) return False @property def dtype(self): """Get the data type of the cache.""" return self.kv_cache[0].dtype @property def key(self): """Get the key cache.""" return self.kv_cache[0] @property def value(self): """Get the value cache.""" return self.kv_cache[1] def store( self, *, key: torch.Tensor, value: torch.Tensor, slots: torch.Tensor, kv_scales: KVScales, ): """Store the key and value at the given slots.""" key_cache = self.kv_cache[0] value_cache = self.kv_cache[1] if self.can_scale(kv_scales) and SYSTEM == "cuda": if kv_scales.key_scale_cpu != 1.0: key = fp8_quantize( key.float(), scale=kv_scales.key_scale, qdtype=self.dtype, scalar=True, )[0] if kv_scales.value_scale_cpu != 1.0: value = fp8_quantize( value.float(), scale=kv_scales.value_scale, qdtype=self.dtype, scalar=True, )[0] if ATTENTION in {"flashdecoding", "flashinfer"}: key = key.to(key_cache.dtype) value = value.to(value_cache.dtype) if key_cache.dtype in {torch.float8_e4m3fn, torch.float8_e5m2}: # Torch index_put does not support float8_{e5m2,e4m3fn} yet, so # put as raw data instead. key_cache = key_cache.view(torch.uint8) value_cache = value_cache.view(torch.uint8) key = key.view(torch.uint8) value = value.view(torch.uint8) shape = key_cache.shape key_cache.view(-1, shape[-2], shape[-1])[slots] = key value_cache.view(-1, shape[-2], shape[-1])[slots] = value elif ATTENTION == "flashdecoding-ipex" and key.device.type == "xpu": import intel_extension_for_pytorch as ipex kv_cache_dtype = "auto" if key_cache.dtype == torch.float8_e5m2: kv_cache_dtype = "fp8_e5m2" if key_cache.dtype == torch.float8_e4m3fn: kv_cache_dtype = "fp8_e4m3" ipex.llm.modules.PagedAttention.reshape_and_cache_flash( key, value, key_cache, value_cache, slots, kv_cache_dtype=kv_cache_dtype, k_scale=kv_scales.key_scale_cpu, v_scale=kv_scales.value_scale_cpu, ) else: paged_reshape_and_cache( key, value, key_cache, value_cache, slots, kv_scales.key_scale_cpu, kv_scales.value_scale_cpu, ) def paged_reshape_and_cache( key: torch.Tensor, value: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, slots: torch.Tensor, k_scale: float = 1.0, v_scale: float = 1.0, ): if SYSTEM == "cuda": kv_cache_dtype = "auto" if key_cache.dtype == torch.float8_e4m3fn: kv_cache_dtype = "fp8" paged_attention.reshape_and_cache( key, value, key_cache, value_cache, slots, kv_cache_dtype, torch.tensor(k_scale), torch.tensor(v_scale), ) elif SYSTEM == "rocm": try: import vllm._custom_ops as ops except Exception as e: raise ImportError( f"Could not import vllm paged attention. Make sure your installation is correct. Complete error: {e}" ) kv_cache_dtype = "auto" if key_cache.dtype == torch.float8_e4m3fn: key_cache = key_cache.view(torch.uint8) value_cache = value_cache.view(torch.uint8) kv_cache_dtype = "fp8" ops.reshape_and_cache( key, value, key_cache, value_cache, slots, kv_cache_dtype, k_scale, v_scale ) elif SYSTEM == "ipex": import intel_extension_for_pytorch as ipex kv_cache_dtype = "auto" if key_cache.dtype == torch.float8_e5m2: kv_cache_dtype = "fp8_e5m2" if key_cache.dtype == torch.float8_e4m3fn: kv_cache_dtype = "fp8_e4m3" ipex.llm.modules.PagedAttention.reshape_and_cache( key, value, key_cache, value_cache, slots, kv_cache_dtype=kv_cache_dtype, k_scale=k_scale, v_scale=v_scale, ) else: raise NotImplementedError( f"Cannot reshape and cache for paged attention, system '{SYSTEM}' not supported" ) def get_kv_scales(weights: Weights, prefix: str) -> KVScales: """Load KV cache scales.""" key_scale = torch.tensor(1.0, dtype=torch.float32, device=weights.device) value_scale = key_scale if weights.has_tensor(f"{prefix}.k_scale") and weights.has_tensor( f"{prefix}.v_scale" ): key_scale = weights.get_tensor(f"{prefix}.k_scale", to_dtype=False).float() value_scale = weights.get_tensor(f"{prefix}.v_scale", to_dtype=False).float() elif weights.has_tensor(f"{prefix}.kv_scale"): # Fall back to older more coarse-grained scale when available. key_scale = weights.get_tensor(f"{prefix}.kv_scale").float() value_scale = key_scale return KVScales(key_scale=key_scale, value_scale=value_scale)
text-generation-inference/server/text_generation_server/layers/attention/kv_cache.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/attention/kv_cache.py", "repo_id": "text-generation-inference", "token_count": 5908 }
279
from dataclasses import dataclass import os from typing import Optional, Tuple, Type, Union, List import torch from loguru import logger from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.kernels import load_kernel from text_generation_server.utils.weights import ( Weight, WeightsLoader, UnquantizedWeight, Weights, ) from text_generation_server.utils.log import log_once if SYSTEM == "cuda": quantization = load_kernel( module="quantization", repo_id="kernels-community/quantization" ) else: quantization = None try: from moe_kernels.fp8_utils import w8a8_block_fp8_matmul, per_token_group_quant_fp8 except ImportError: w8a8_block_fp8_matmul = None per_token_group_quant_fp8 = None quant_dtype: torch.dtype = ( torch.float8_e4m3fnuz if SYSTEM == "rocm" else torch.float8_e4m3fn ) if SYSTEM == "cuda" and quantization is not None: major, minor = torch.cuda.get_device_capability() CUTLASS_FP8_AVAILABLE = quantization.cutlass_scaled_mm_supports_fp8( major * 10 + minor ) else: CUTLASS_FP8_AVAILABLE = False def get_fp8_linear(force_w8a16: bool = False) -> Type[torch.nn.Module]: """ Return an FP8 linear `Module` that is compatible with the current system. """ if SYSTEM == "cuda": major, _ = torch.cuda.get_device_capability() # Marlin is W8A16, use it when: # # - On capability 8.x where x < 8: W8A8 FP8 GEMM is not supported. # - On capability 8.9: W8A8 FP8 GEMM is supported, but Marlin-FP8 is faster. # - On capability 9.x when force_w8a16: cutlass kernels do not support W8A16. if (major == 8 or (major == 9 and force_w8a16)) and os.getenv( "USE_CUTLASS_W8A8", "0" ) != "1": # NOTE: Capability 8.9 is supported by cutlass kernels, but FP8-Marlin # gives better decoding throughput on L4 and L40. from text_generation_server.layers.marlin import GPTQMarlinFP8Linear if major == 8 and minor == 9: log_once( logger.info, "GPU supports FP8, but using Marlin FP8 kernel for better performance", ) else: log_once( logger.info, "GPU does not support FP8, using Marlin FP8 kernel" ) return GPTQMarlinFP8Linear # On other systems let Torch decide if the hardware supports FP8. return Fp8Linear def normalize_e4m3fn_to_native_float8( weight: torch.Tensor, weight_scale: torch.Tensor, input_scale: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: if weight.dtype == torch.float8_e4m3fn and SYSTEM == "rocm": # The bits pattern 10000000(-128) represents zero in e4m3fn # but NaN in e4m3fnuz. So here we set it to 0. # https://onnx.ai/onnx/technical/float8.html weight_as_int8 = weight.view(torch.int8) ROCM_FP8_NAN_AS_INT = -128 weight_as_int8[weight_as_int8 == ROCM_FP8_NAN_AS_INT] = 0 weight = weight_as_int8.view(torch.float8_e4m3fnuz) # For the same bits representation, e4m3fnuz value is half of # the e4m3fn value, so we should double the scaling factor to # get the same dequantized value. # https://onnx.ai/onnx/technical/float8.html weight_scale = weight_scale * 2.0 if input_scale is not None: input_scale = input_scale * 2.0 return weight, weight_scale, input_scale def per_tensor_dequantize( tensor: torch.Tensor, inv_scale: Union[float, torch.Tensor], dtype: torch.dtype = torch.float16, ) -> torch.Tensor: fake_qweight = tensor.to(dtype) dq_weight = fake_qweight * inv_scale return dq_weight def requantize_with_max_scale( weight: torch.Tensor, weight_scale: torch.Tensor, logical_widths: int, dtype: torch.dtype, ) -> Tuple[torch.Tensor, torch.Tensor]: # Max scale to be used for requanitzation. max_w_scale = weight_scale.max().float() start = 0 for idx, logical_width in enumerate(logical_widths): end = start + logical_width weight_dq = per_tensor_dequantize( weight[start:end, :], weight_scale[idx], dtype ) weight[start:end, :], max_w_scale_normalized = fp8_quantize( weight_dq, max_w_scale ) start = end return weight, max_w_scale_normalized def fp8_quantize( weight: torch.Tensor, scale: Optional[torch.Tensor] = None, scale_upper_bound: Optional[torch.Tensor] = None, qdtype: torch.dtype = torch.float8_e4m3fn, scalar: bool = False, ): """ This function returns a reciprocal of the scale, so that a tensor can be unscaled by multiplying it with the returned scale. If a scale is given through the `scale` argument, it must also be a reciprocal (so that scales from an FP8 checkpoint can be used without modification). """ if quantization is not None: shape = weight.shape qweight, scale = quantization.scaled_fp8_quant( weight.reshape(-1, shape[-1]), scale=scale, scale_ub=scale_upper_bound, # TODO: don't do this when we have to use the Torch kernel. use_per_token_if_dynamic=not scalar, ) return qweight.reshape(shape), scale finfo = torch.finfo(qdtype) if scale is None: # Calculate the scale as dtype max divided by absmax scale = finfo.max / weight.abs().max().clamp(min=1e-12, max=scale_upper_bound) # scale and clamp the tensor to bring it to # the representative range of float8 data type # (as default cast is unsaturated) qweight = (weight * scale).clamp(min=finfo.min, max=finfo.max) scale = scale.float().reciprocal() else: if SYSTEM == "rocm": scale = scale / 2.0 # Use reciprocal to avoid more expensive division. qweight = (weight * scale.reciprocal()).clamp(min=finfo.min, max=finfo.max) # Return both float8 data and the inverse scale (as float), # as both required as inputs to torch._scaled_mm qweight = qweight.to(qdtype) if SYSTEM == "rocm": qweight, scale, _ = normalize_e4m3fn_to_native_float8(qweight, scale) return qweight, scale class HybridFP8UnquantLoader(WeightsLoader): """Weight loader that loads FP8 and unquantized Torch tensors.""" def __init__( self, activation_scale_ub: Optional[float], to_fp8: bool, weight_block_size: Optional[List[int]] = None, ): self.activation_scale_ub = activation_scale_ub self.to_fp8 = to_fp8 self.weight_block_size = weight_block_size def get_weights(self, weights: "Weights", prefix: str): w = weights.get_tensor(f"{prefix}.weight") if w.dtype == torch.float8_e4m3fn: if self.weight_block_size is not None: scale = weights.get_tensor(f"{prefix}.weight_scale_inv") return Fp8Weight( weight=w, weight_scale=scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, weight_block_size=self.weight_block_size, ) # FP8 branch scale = weights.get_tensor(f"{prefix}.weight_scale", to_dtype=False) if SYSTEM == "cuda": scale.reshape(-1).expand(w.shape[0]) input_scale = None if weights.has_tensor(f"{prefix}.input_scale"): input_scale = ( weights.get_tensor(f"{prefix}.input_scale", to_dtype=False) .reshape(-1) .max() ) return Fp8Weight( weight=w, weight_scale=scale, input_scale=input_scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, ) if self.to_fp8: return Fp8Weight(weight=w, dtype=weights.dtype) return UnquantizedWeight(w) def get_weights_col_packed( self, weights: Weights, prefix: str, block_sizes: Union[int, List[int]], ): w = weights.get_packed_sharded( f"{prefix}.weight", dim=0, block_sizes=block_sizes ) if w.dtype == torch.float8_e4m3fn: # FP8 branch scale = weights.get_tensor(f"{prefix}.weight_scale", to_dtype=False) if scale.numel() > 1: scale = weights.get_packed_sharded( f"{prefix}.weight_scale", dim=0, block_sizes=block_sizes, to_dtype=False, ) if SYSTEM == "cuda": scale = scale.reshape(-1).expand(w.shape[0]) input_scale = None if weights.has_tensor(f"{prefix}.input_scale"): input_scale = weights.get_tensor( f"{prefix}.input_scale", to_dtype=False ) if input_scale.numel() > 1: input_scale = weights.get_packed_sharded( f"{prefix}.input_scale", dim=0, block_sizes=block_sizes, to_dtype=False, ) input_scale = input_scale.reshape(-1).max() return Fp8Weight( weight=w, weight_scale=scale, input_scale=input_scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, ) if self.to_fp8: return Fp8Weight(weight=w, dtype=weights.dtype) return UnquantizedWeight(w) def get_multi_weights_col(self, weights: "Weights", prefixes: List[str], dim: int): # FIXME: Force to_device to false as fp8 weights do not support torch.cat on device yet w = [ weights.get_sharded(f"{p}.weight", dim=0, to_device=False) for p in prefixes ] shapes = [x.shape for x in w] # Concat then send to the device w = torch.cat(w, dim=dim).to(weights.device) # FP8 branch if w.dtype == torch.float8_e4m3fn: if self.weight_block_size is not None: scale = [ weights.get_sharded(f"{p}.weight_scale_inv", dim=0, to_device=False) for p in prefixes ] scale = torch.cat(scale, dim=dim) scale = scale.to(weights.device) return Fp8Weight( weight=w, weight_scale=scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, weight_block_size=self.weight_block_size, ) scale = [ _load_scalar_or_matrix_scale(weights, f"{p}.weight_scale", shape) for p, shape in zip(prefixes, shapes) ] scale = torch.cat(scale, dim=0).reshape(-1) input_scale = [ _load_scalar_or_matrix_scale(weights, f"{p}.input_scale", shape) for p, shape in zip(prefixes, shapes) if weights.has_tensor(f"{p}.input_scale") ] assert len(input_scale) == 0 or len(input_scale) == len(prefixes) input_scale = ( torch.cat(input_scale, dim=0).reshape(-1).max() if len(input_scale) != 0 else None ) if SYSTEM == "rocm": w, scale, input_scale = normalize_e4m3fn_to_native_float8( w, scale, input_scale ) if scale.numel() == len(prefixes): logical_widths = [x[0] for x in shapes] w, scale = requantize_with_max_scale( w, scale.to(weights.device), logical_widths, weights.dtype ) return Fp8Weight( weight=w, weight_scale=scale, input_scale=input_scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, ) if self.to_fp8: return Fp8Weight(weight=w, dtype=weights.dtype) return UnquantizedWeight(w) def get_weights_row(self, weights: "Weights", prefix: str): w = weights.get_sharded(f"{prefix}.weight", dim=1) # FP8 branch if w.dtype == torch.float8_e4m3fn: if self.weight_block_size is not None: # XXX: Yes the weights is named scale_inv, but corresponds to scale it seems. scale = weights.get_sharded(f"{prefix}.weight_scale_inv", dim=1) return Fp8Weight( weight=w, weight_scale=scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, weight_block_size=self.weight_block_size, ) scale = weights.get_tensor(f"{prefix}.weight_scale", to_dtype=False) if SYSTEM == "cuda": scale = scale.reshape(-1).expand(w.shape[0]) input_scale = None if weights.has_tensor(f"{prefix}.input_scale"): input_scale = ( weights.get_tensor(f"{prefix}.input_scale", to_dtype=False) .reshape(-1) .max() ) return Fp8Weight( weight=w, weight_scale=scale, input_scale=input_scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, ) if self.to_fp8: return Fp8Weight(weight=w, dtype=weights.dtype) return UnquantizedWeight(w) @dataclass class Fp8Weight(Weight): weight: torch.Tensor dtype: torch.dtype weight_scale: Optional[torch.Tensor] = None input_scale: Optional[torch.Tensor] = None activation_scale_ub: Optional[float] = None force_w8a16: bool = False weight_block_size: Optional[List[int]] = None def get_linear(self, bias: torch.Tensor): if self.weight_scale is None: return get_fp8_linear(force_w8a16=self.force_w8a16).from_unquant( self.weight, bias, self.dtype ) # This is not checked by the fbgemm kernels, but they require contiguous # memory. Can be non-contiguous when we e.g. expand from scalars. self.weight_scale = self.weight_scale.contiguous() return get_fp8_linear(force_w8a16=self.force_w8a16).from_fp8( weight=self.weight, scale=self.weight_scale, dtype=self.dtype, bias=bias, input_scale=self.input_scale, scale_upper_bound=self.activation_scale_ub, weight_block_size=self.weight_block_size, ) class Fp8Linear(torch.nn.Module): _device_identity_cache = {} def __init__( self, qweight: torch.Tensor, scale: torch.Tensor, dtype: torch.dtype, bias: Optional[torch.Tensor] = None, input_scale: Optional[torch.Tensor] = None, scale_upper_bound: Optional[float] = None, weight_block_size: Optional[List[int]] = None, ) -> None: super().__init__() if CUTLASS_FP8_AVAILABLE: log_once(logger.info, "Using cutlass w8a8 kernels") if SYSTEM == "rocm" and qweight.dtype == torch.float8_e4m3fn: qweight, scale, input_scale = normalize_e4m3fn_to_native_float8( weight=qweight, weight_scale=scale, input_scale=input_scale ) self.dtype = dtype self.qweight = qweight self.scale = scale.float() self.input_scale = input_scale.float() if input_scale is not None else None self.weight_block_size = weight_block_size if CUTLASS_FP8_AVAILABLE and scale_upper_bound is not None: self.scale_upper_bound = torch.tensor( scale_upper_bound, dtype=torch.float32, device=qweight.device ) else: self.scale_upper_bound = scale_upper_bound self.bias = bias if bias is not None else None @classmethod def from_unquant(cls, weight, bias, dtype): qweight, scale = fp8_quantize(weight, scalar=not CUTLASS_FP8_AVAILABLE) return cls( qweight=qweight, scale=scale, dtype=dtype, bias=bias, input_scale=None, scale_upper_bound=None, ) @classmethod def from_fp8( cls, weight: torch.Tensor, scale: torch.Tensor, dtype: torch.dtype, bias: Optional[torch.Tensor] = None, **kwargs, ) -> "Fp8Linear": input_scale = kwargs.get("input_scale", None) scale_upper_bound = kwargs.get("scale_upper_bound", None) weight_block_size = kwargs.get("weight_block_size", None) return cls( qweight=weight, scale=scale, input_scale=input_scale, scale_upper_bound=scale_upper_bound, bias=bias, dtype=dtype, weight_block_size=weight_block_size, ) @classmethod def get_shared_device_identity(cls, device): # Input scaling factors are no longer optional in _scaled_mm starting # from pytorch 2.5. Allocating a dummy tensor to pass as input_scale if device not in cls._device_identity_cache: cls._device_identity_cache[device] = torch.ones(1, device=device) return cls._device_identity_cache[device] def forward(self, input: torch.Tensor) -> torch.Tensor: if self.weight_block_size is not None: # https://arxiv.org/pdf/2412.19437 # At a more granular level. As illustrated in Figure 7 (a), (1) for activations, we group and # scale elements on a 1x128 tile basis (i.e., per token per 128 channels); and (2) for weights, we # group and scale elements on a 128x128 block basis (i.e., per 128 input channels per 128 output # channels). qinput, scale = per_token_group_quant_fp8(input, self.weight_block_size[1]) output = w8a8_block_fp8_matmul( qinput, self.qweight, scale, self.scale, self.weight_block_size, output_dtype=input.dtype, ) if self.bias is not None: output = output + self.bias return output.to(dtype=input.dtype) if CUTLASS_FP8_AVAILABLE: # cutlass FP8 supports per-token scales, so get non-scalar scales. qinput, scale = fp8_quantize( input, scale_upper_bound=self.scale_upper_bound, scalar=False ) return quantization.cutlass_scaled_mm( qinput, self.qweight.t(), scale, self.scale, input.dtype, self.bias ) qinput, scale = fp8_quantize( input, self.input_scale, scale_upper_bound=self.scale_upper_bound, scalar=True, ) per_tensor_weights = self.scale.numel() == 1 per_tensor_activations = scale.numel() == 1 if SYSTEM != "rocm" or (per_tensor_weights and per_tensor_activations): output = torch._scaled_mm( qinput, self.qweight.t(), out_dtype=self.dtype, scale_a=scale, scale_b=self.scale, bias=self.bias, ) if isinstance(output, tuple) and len(output) == 2: output = output[0] else: device_identity = None if SYSTEM == "rocm": device_identity = self.get_shared_device_identity(self.qweight.device) output = torch._scaled_mm( qinput, self.qweight.t(), scale_a=device_identity, scale_b=device_identity, out_dtype=torch.float32, ) if isinstance(output, tuple) and len(output) == 2: output = output[0] output = output * scale * self.scale.t() if self.bias is not None: output = output + self.bias output = output.to(dtype=self.dtype) return output def _load_scalar_or_matrix_scale(weights: Weights, prefix: str, shape: torch.Size): scale = weights.get_tensor(prefix, to_dtype=False) if scale.numel() > 1: scale = weights.get_sharded(prefix, dim=0, to_dtype=False) elif SYSTEM == "rocm": return scale.reshape(-1) return scale.reshape(-1).expand(shape[0])
text-generation-inference/server/text_generation_server/layers/fp8.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/fp8.py", "repo_id": "text-generation-inference", "token_count": 10546 }
280
import functools from typing import List, Tuple import numpy import torch from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.kernels import load_kernel if SYSTEM == "cuda": quantization = load_kernel( module="quantization", repo_id="kernels-community/quantization" ) else: quantization = None try: major, _minor = torch.cuda.get_device_capability() has_sm_8_0 = major >= 8 except Exception: has_sm_8_0 = False def _check_marlin_kernels(): if not (SYSTEM == "cuda" and has_sm_8_0): raise NotImplementedError( "Using quantized Marlin models requires a GPU with CUDA capability 8.0 or later." ) if quantization is None: raise NotImplementedError( "marlin is not installed, install it with: pip install server/marlin" ) # https://github.com/IST-DASLab/marlin/blob/2f6d7c10e124b3c5fa29ff8d77d568bd7af3274c/marlin/__init__.py#L40C1-L68C54 @functools.cache def get_perms() -> Tuple[List[int], List[int]]: scale_perm = [] for i in range(8): scale_perm.extend([i + 8 * j for j in range(8)]) scale_perm_single = [] for i in range(4): scale_perm_single.extend([2 * i + j for j in [0, 1, 8, 9, 16, 17, 24, 25]]) return scale_perm, scale_perm_single def permute_scales(scales: torch.Tensor): scale_perm, scale_perm_single = get_perms() out_features = scales.shape[1] if scales.shape[0] == 1: scales = scales.reshape((-1, len(scale_perm_single)))[:, scale_perm_single] else: scales = scales.reshape((-1, len(scale_perm)))[:, scale_perm] return scales.reshape((-1, out_features)).contiguous() # Functions below are from vLLM def get_pack_factor(bits: int) -> int: if 32 % bits != 0: raise ValueError(f"Cannot {bits} bit values into uint32") return 32 // bits def pack_cols( q_w: torch.Tensor, num_bits: int, size_k: int, size_n: int, ): assert q_w.shape == (size_k, size_n) pack_factor = get_pack_factor(num_bits) assert size_n % pack_factor == 0 orig_device = q_w.device q_w = q_w.cpu().numpy().astype(numpy.uint32) q_res = numpy.zeros((size_k, size_n // pack_factor), dtype=numpy.uint32) for i in range(pack_factor): q_res |= q_w[:, i::pack_factor] << num_bits * i q_res = torch.from_numpy(q_res.astype(numpy.int32)).to(orig_device) q_res = q_res.contiguous() return q_res def unpack_cols( packed_q_w: torch.Tensor, num_bits: int, size_k: int, size_n: int, ): pack_factor = get_pack_factor(num_bits) assert size_n % pack_factor == 0 assert packed_q_w.shape == ( size_k, size_n // pack_factor, ), "packed_q_w.shape = {} size_k = {}, size_n = {} pack_Factor = {}".format( packed_q_w.shape, size_k, size_n, pack_factor ) orig_device = packed_q_w.device packed_q_w_cpu = packed_q_w.cpu().numpy().astype(numpy.uint32) q_res = numpy.zeros((size_k, size_n), dtype=numpy.uint32) mask = (1 << num_bits) - 1 for i in range(pack_factor): vals = packed_q_w_cpu & mask packed_q_w_cpu >>= num_bits q_res[:, i::pack_factor] = vals q_res = torch.from_numpy(q_res.astype(numpy.int32)).to(orig_device) q_res = q_res.contiguous() return q_res def marlin_zero_points( zp: torch.Tensor, size_k: int, size_n: int, num_bits: int ) -> torch.Tensor: scale_perm, _ = get_perms() # Permute zero-points in a similar way to scales, but do not use the # "single" permutation, since zero-points are applied on every MMA zp = zp.reshape((-1, len(scale_perm)))[:, scale_perm] # Interleave column dim (for the dequantize code) and pack it to int32 if num_bits == 4: interleave = numpy.array([0, 2, 4, 6, 1, 3, 5, 7]) elif num_bits == 8: interleave = numpy.array([0, 2, 1, 3]) else: raise Exception("num_bits must be 4 or 8, got {}".format(num_bits)) zp = zp.reshape((-1, len(interleave)))[:, interleave].ravel() zp = zp.reshape((-1, size_n)).contiguous() zp = pack_cols(zp, num_bits, size_k, size_n) return zp
text-generation-inference/server/text_generation_server/layers/marlin/util.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/marlin/util.py", "repo_id": "text-generation-inference", "token_count": 1826 }
281
# coding=utf-8 # Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch IdeficsVision model: a copy of CLIPVisionModel using a simpler config object""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from transformers.utils import ( ModelOutput, logging, ) from text_generation_server.layers import ( TensorParallelColumnLinear, TensorParallelRowLinear, TensorParallelEmbedding, ) logger = logging.get_logger(__name__) @dataclass class IdeficsVisionModelOutput(ModelOutput): """ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. Args: image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Idefics class IdeficsVisionEmbeddings(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter( weights.get_tensor(f"{prefix}.class_embedding") ) self.patch_embedding = nn.Conv2d.load_no_bias( prefix=f"{prefix}.patch_embedding", weights=weights, in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = TensorParallelEmbedding( prefix="model.vision_model.embeddings.position_embedding", weights=weights ) self.position_ids = ( torch.arange(self.num_positions).expand((1, -1)).to(device=weights.device) ) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding( pixel_values.to(dtype=target_dtype) ) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->IdeficsVision class IdeficsVisionAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.embed_dim = self.embed_dim // weights.process_group.size() self.k_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.k_proj", weights=weights, bias=True ) self.v_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.v_proj", weights=weights, bias=True ) self.q_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.q_proj", weights=weights, bias=True ) self.out_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.out_proj", weights=weights, bias=True ) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return ( tensor.view(bsz, seq_len, self.num_heads, self.head_dim) .transpose(1, 2) .contiguous() ) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) # apply the causal_attention_mask first if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {causal_attention_mask.size()}" ) attn_weights = ( attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = ( attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view( bsz, self.num_heads, tgt_len, src_len ) attn_weights = attn_weights_reshaped.view( bsz * self.num_heads, tgt_len, src_len ) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->IdeficsVision class IdeficsVisionMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.fc1", weights=weights, bias=True ) self.fc2 = TensorParallelRowLinear.load( config, prefix=f"{prefix}.fc2", weights=weights, bias=True ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->IdeficsVision class IdeficsVisionEncoderLayer(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.embed_dim = config.hidden_size self.self_attn = IdeficsVisionAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) self.layer_norm1 = nn.LayerNorm.load( prefix=f"{prefix}.layer_norm1", weights=weights, eps=config.layer_norm_eps ) self.mlp = IdeficsVisionMLP( prefix=f"{prefix}.mlp", config=config, weights=weights ) self.layer_norm2 = nn.LayerNorm.load( prefix=f"{prefix}.layer_norm2", weights=weights, eps=config.layer_norm_eps ) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->IdeficsVision class IdeficsVisionEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`IdeficsVisionEncoderLayer`]. Args: config: IdeficsVisionConfig """ def __init__(self, prefix, config, weights): super().__init__() self.config = config self.layers = nn.ModuleList( [ IdeficsVisionEncoderLayer( prefix=f"{prefix}.encoder.layers.{layer_id}", config=config, weights=weights, ) for layer_id in range(config.num_hidden_layers) ] ) # self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # if self.gradient_checkpointing and self.training: # def create_custom_forward(module): # def custom_forward(*inputs): # return module(*inputs, output_attentions) # return custom_forward # layer_outputs = torch.utils.checkpoint.checkpoint( # create_custom_forward(encoder_layer), # hidden_states, # attention_mask, # causal_attention_mask, # ) # else: layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, encoder_states, all_attentions] if v is not None ) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions, ) # Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer class IdeficsVisionTransformer(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embeddings = IdeficsVisionEmbeddings( prefix=f"{prefix}.embeddings", config=config, weights=weights ) self.pre_layrnorm = nn.LayerNorm.load( prefix=f"{prefix}.pre_layrnorm", weights=weights, eps=config.layer_norm_eps ) self.encoder = IdeficsVisionEncoder( prefix=prefix, config=config, weights=weights ) self.post_layernorm = nn.LayerNorm.load( prefix=f"{prefix}.post_layernorm", weights=weights, eps=config.layer_norm_eps, ) # copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.forward def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_vision.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_vision.py", "repo_id": "text-generation-inference", "token_count": 9625 }
282
from io import BytesIO from PIL import Image import torch import time from dataclasses import dataclass from opentelemetry import trace from transformers import ( AutoConfig, AutoProcessor, AutoTokenizer, PreTrainedTokenizerBase, ProcessorMixin, ) from typing import Optional, Tuple, List, Type, Dict from text_generation_server.models import Model from text_generation_server.models.types import ( Batch, Tokens, Generation, GeneratedText, ) from text_generation_server.pb import generate_pb2 from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling import torch.distributed from text_generation_server.models.custom_modeling.idefics_modeling import ( IdeficsForVisionText2Text, ) from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) from text_generation_server.utils.quantization import get_loader from text_generation_server.utils.import_utils import SYSTEM tracer = trace.get_tracer(__name__) @dataclass class IdeficsCausalLMBatch(Batch): batch_id: int requests: List[generate_pb2.Request] requests_idx_mapping: Dict[int, int] # Decoder values input_ids: torch.Tensor attention_mask: torch.Tensor position_ids: torch.Tensor pixel_values: Optional[torch.Tensor] image_hidden_states: Optional[torch.Tensor] image_attention_mask: Optional[torch.Tensor] past_key_values: Optional[List[Tuple]] # All tokens all_input_ids: List[torch.Tensor] # Lengths of all generations present in the batch input_lengths: List[int] prefix_offsets: List[int] read_offsets: List[int] # Generation helpers next_token_choosers: List[NextTokenChooser] stopping_criterias: List[StoppingCriteria] # Metadata used for padding max_input_length: int padding_right_offset: int # Maximum number of tokens this batch will grow to max_tokens: int # Past metadata keys_head_dim_last: bool = True def to_pb(self) -> generate_pb2.CachedBatch: return generate_pb2.CachedBatch( id=self.batch_id, request_ids=[r.id for r in self.requests], size=len(self), max_tokens=self.max_tokens, current_tokens=len(self), ) @classmethod def from_pb( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device, ) -> "IdeficsCausalLMBatch": raise NotImplementedError @classmethod def from_pb_processor( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, processor: ProcessorMixin, # Hack config, dtype: torch.dtype, device: torch.device, ) -> "IdeficsCausalLMBatch": inputs = [] next_token_choosers = [] stopping_criterias = [] prefix_offsets = [] read_offsets = [] requests_idx_mapping = {} # Parse batch max_truncation = 0 padding_right_offset = 0 max_decode_tokens = 0 for i, r in enumerate(pb.requests): requests_idx_mapping[r.id] = i inputs.append(r.input_chunks.chunks) next_token_choosers.append( NextTokenChooser.from_pb(r.parameters, device, tokenizer) ) stopping_criteria = StoppingCriteria.from_pb( r.stopping_parameters, tokenizer ) stopping_criterias.append(stopping_criteria) max_truncation = max(max_truncation, r.truncate) max_decode_tokens += stopping_criteria.max_new_tokens padding_right_offset = max( padding_right_offset, stopping_criteria.max_new_tokens ) # TODO Check impact on idefics prompts = [] for inp in inputs: # Each input is encoded into a list, where each element of this input list is either a string or a URL prompt = [] for chunk in inp: chunk_type = chunk.WhichOneof("chunk") if chunk_type == "text": prompt.append(chunk.text) elif chunk_type == "image": image = Image.open(BytesIO(chunk.image.data)) prompt.append(image) else: raise RuntimeError(f"Invalid chunk type {chunk_type}") prompts.append(prompt) # The processor replaces the call to tokenizer, and # a/ takes care of fetching images from the URL # b/ generate the correct input_ids, attention_mask, pixel_values, image_attention_mask to feed to the model tokenized_inputs = processor( prompts, return_tensors="pt", padding=True, truncation=True, max_length=max_truncation, # TODO Check impact on idefics # add_end_of_utterance_token=False, # Already taken care of inside the prompts, so bypassing the processor's handling of this token ).to(device) for _ in pb.requests: input_len = tokenized_inputs["input_ids"].shape[1] prefix_offsets.append( input_len - 5 ) # To decode without potential fallbacks errors read_offsets.append( input_len ) # To decode without potential fallbacks errors input_lengths = tokenized_inputs["attention_mask"].sum(1) max_input_length = input_lengths.max() input_ids = tokenized_inputs["input_ids"] pixel_values = tokenized_inputs.get("pixel_values", None) image_hidden_states = None # Allocate maximum attention_mask attention_mask = input_ids.new_zeros( (pb.size, max_input_length + padding_right_offset) ) # Copy tokenizer attention_mask into fully allocated attention_mask attention_mask[:, :max_input_length] = tokenized_inputs["attention_mask"] # Do the same for image_attention_mask if pixel_values is None: image_attention_mask = None else: image_attention_mask = input_ids.new_zeros( ( pb.size, max_input_length + padding_right_offset, pixel_values.size(1), ) ) image_attention_mask[:, :max_input_length, :] = tokenized_inputs[ "image_attention_mask" ] position_ids = tokenized_inputs["attention_mask"].long().cumsum(-1) - 1 position_ids.masked_fill_(tokenized_inputs["attention_mask"] == 0, 1) all_input_ids = tokenized_inputs["input_ids"].T.split( 1, dim=1 ) # It's input_ids but splitted into a tuple of tensors where each tensor is (seq_len, 1) size. It is then transformed into a list max_tokens = len(inputs) * (max_input_length + max_decode_tokens) return cls( batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, pixel_values=pixel_values, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, past_key_values=None, all_input_ids=list(all_input_ids), input_lengths=input_lengths.tolist(), prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, max_input_length=max_input_length.item(), padding_right_offset=padding_right_offset, max_tokens=max_tokens, ) @tracer.start_as_current_span("filter") def filter(self, request_ids: List[int]) -> Optional["IdeficsCausalLMBatch"]: # It deletes requests from the batch. For instance when client lost connection if len(request_ids) == 0: raise ValueError("Batch must have at least one request") if len(request_ids) == len(self): return self keep_indices = [] # New values after filtering requests_idx_mapping = {} requests = [] input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] max_input_length = 0 next_token_choosers = [] stopping_criterias = [] total_remaining_decode_tokens = 0 new_padding_right_offset = 0 for i, request_id in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] requests_idx_mapping[request_id] = i keep_indices.append(idx) requests.append(self.requests[idx]) prefix_offsets.append(self.prefix_offsets[idx]) read_offsets.append(self.read_offsets[idx]) all_input_ids.append(self.all_input_ids[idx]) request_input_length = self.input_lengths[idx] input_lengths.append(request_input_length) max_input_length = max(max_input_length, request_input_length) next_token_choosers.append(self.next_token_choosers[idx]) stopping_criteria = self.stopping_criterias[idx] stopping_criterias.append(stopping_criteria) remaining_decode_tokens = ( stopping_criteria.max_new_tokens - stopping_criteria.current_tokens ) total_remaining_decode_tokens += remaining_decode_tokens new_padding_right_offset = max( new_padding_right_offset, remaining_decode_tokens ) # Apply indices to input_ids, attention mask, past key values and other items that need to be cached input_ids = self.input_ids[keep_indices] position_ids = self.position_ids[keep_indices] self.attention_mask = self.attention_mask[ keep_indices, -(self.padding_right_offset + max_input_length) : ( self.attention_mask.shape[1] - self.padding_right_offset ) + new_padding_right_offset, ] # Do the same for pixel_values and image_attention_mask pixel_values = self.pixel_values[keep_indices] self.image_attention_mask = self.image_attention_mask[ keep_indices, -(self.padding_right_offset + max_input_length) : ( self.image_attention_mask.shape[1] - self.padding_right_offset ) + new_padding_right_offset, :, ] if self.image_hidden_states is None: image_hidden_states = None else: image_hidden_states = self.image_hidden_states[keep_indices] # Ensure that past_key_values tensors can be updated in-place if type(self.past_key_values[0]) is tuple: self.past_key_values = [list(layer) for layer in self.past_key_values] # Update tensors in-place to allow incremental garbage collection past_kv_length = max_input_length - 1 for layer in self.past_key_values: past_keys, past_values = layer if len(past_keys.shape) == 3: # Force past to be of dim [self_size, num_heads, ...] for easy indexing past_keys = past_keys.view(len(self), -1, *past_keys.shape[-2:]) past_values = past_values.view(len(self), -1, *past_values.shape[-2:]) if self.keys_head_dim_last: layer[0] = past_keys[keep_indices, :, -past_kv_length:, :] else: layer[0] = past_keys[keep_indices, :, :, -past_kv_length:] del past_keys layer[1] = past_values[keep_indices, :, -past_kv_length:, :] del past_values max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens self.requests = requests self.requests_idx_mapping = requests_idx_mapping self.input_ids = input_ids self.pixel_values = pixel_values self.image_hidden_states = image_hidden_states self.position_ids = position_ids self.all_input_ids = all_input_ids self.input_lengths = input_lengths self.prefix_offsets = prefix_offsets self.read_offsets = read_offsets self.next_token_choosers = next_token_choosers self.stopping_criterias = stopping_criterias self.max_input_length = max_input_length self.padding_right_offset = new_padding_right_offset self.max_tokens = max_tokens return self @classmethod @tracer.start_as_current_span("concatenate") def concatenate( cls, batches: List["IdeficsCausalLMBatch"] ) -> "IdeficsCausalLMBatch": # It adds new requests to the batch # Used for padding total_batch_size = 0 max_input_length = 0 max_num_images = 0 padding_right_offset = 0 for batch in batches: total_batch_size += len(batch) max_input_length = max(max_input_length, batch.max_input_length) max_num_images = max(max_num_images, batch.pixel_values.size(1)) padding_right_offset = max(padding_right_offset, batch.padding_right_offset) # Batch attributes requests = [] requests_idx_mapping = {} input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] next_token_choosers = [] stopping_criterias = [] max_tokens = 0 # Batch tensors input_ids = None attention_mask = None position_ids = None pixel_values = None image_hidden_states = None image_attention_mask = None past_key_values = [] # Used for slicing correctly inside the tensors # Equivalent to a cumsum on batch sizes start_index = 0 for i, batch in enumerate(batches): requests.extend(batch.requests) input_lengths.extend(batch.input_lengths) prefix_offsets.extend(batch.prefix_offsets) read_offsets.extend(batch.read_offsets) all_input_ids.extend(batch.all_input_ids) next_token_choosers.extend(batch.next_token_choosers) stopping_criterias.extend(batch.stopping_criterias) if i == 0: requests_idx_mapping = batch.requests_idx_mapping else: # We need to offset the mapping for each batch by the cumulative batch size for k, v in batch.requests_idx_mapping.items(): requests_idx_mapping[k] = v + start_index # Slicing end index for this batch end_index = start_index + len(batch) # We only concatenate batches that did at least one step if batch.past_key_values is None: raise ValueError("only concatenate prefilled batches") # Create empty tensor # input_ids is always of shape [batch_size, 1] # We do not need to pad it if input_ids is None: input_ids = batch.input_ids.new_empty((total_batch_size, 1)) # Copy to correct indices input_ids[start_index:end_index] = batch.input_ids # Create padded tensor if attention_mask is None: attention_mask = batch.attention_mask.new_zeros( (total_batch_size, max_input_length + padding_right_offset), ) curr_batch_max_num_images = batch.pixel_values.size(1) if pixel_values is None: pixel_values = batch.pixel_values.new_zeros( (total_batch_size, max_num_images, 3, 224, 224) ) pixel_values[start_index:end_index, :curr_batch_max_num_images] = ( batch.pixel_values ) if image_attention_mask is None: image_attention_mask = batch.image_attention_mask.new_zeros( ( total_batch_size, max_input_length + padding_right_offset, max_num_images, ) ) # We need to slice the attention mask to remove padding from previous steps # and to remove unused allocated space left_offset = max_input_length - batch.max_input_length batch_left_offset = ( batch.attention_mask.shape[1] - batch.max_input_length - batch.padding_right_offset ) attention_mask[ start_index:end_index, left_offset:-padding_right_offset, ] = batch.attention_mask[ :, batch_left_offset : -batch.padding_right_offset, ] image_attention_mask[ start_index:end_index, left_offset:-padding_right_offset, :curr_batch_max_num_images, ] = batch.image_attention_mask[ :, batch_left_offset : -batch.padding_right_offset, : ] # Create empty tensor # position_ids is always of shape [batch_size, 1] if position_ids is None: position_ids = batch.position_ids.new_empty((total_batch_size, 1)) position_ids[start_index:end_index] = batch.position_ids # Shenanigans to get dimensions because BLOOM outputs a past with a different shape # BLOOM Keys: [batch_size * num_heads, head_dim, seq_length] # BLOOM Values: [batch_size * num_heads, seq_length, head_dim] # And ensure that we can update tensors in-place if isinstance(batch.past_key_values[0], tuple): batch.past_key_values = [ [t.view(len(batch), -1, *t.shape[-2:]) for t in layer] for layer in batch.past_key_values ] elif len(batch.past_key_values[0][0].shape) == 3: for layer in batch.past_key_values: for k, t in enumerate(layer): layer[k] = t.view(len(batch), -1, *t.shape[-2:]) # Add eventual padding tokens that were added while concatenating max_tokens += batch.max_tokens + ( max_input_length - batch.max_input_length ) * len(batch) start_index = end_index first_past_kvs = batches[0].past_key_values _, num_heads, padded_sequence_length, head_dim = first_past_kvs[0][1].shape padded_past_values_shape = ( total_batch_size, num_heads, max_input_length - 1, head_dim, ) if batches[0].keys_head_dim_last: padded_past_keys_shape = padded_past_values_shape else: # seq_length is last for BLOOM padded_past_keys_shape = ( total_batch_size, num_heads, head_dim, max_input_length - 1, ) # Iterate over attention layers # Concatenate past key values layer by layer to allow incremental garbage collection for j in range(len(first_past_kvs)): padded_past_keys = first_past_kvs[j][0].new_zeros(padded_past_keys_shape) start_index = 0 for batch in batches: past_keys = batch.past_key_values[j][0] # Clear reference to the original tensor batch.past_key_values[j][0] = None # Slicing end index for this batch end_index = start_index + len(batch) # We slice the keys to remove the padding from previous batches past_seq_len = batch.max_input_length - 1 if batch.keys_head_dim_last: padded_past_keys[start_index:end_index, :, -past_seq_len:, :] = ( past_keys[:, :, -past_seq_len:, :] ) else: # BLOOM case padded_past_keys[start_index:end_index, :, :, -past_seq_len:] = ( past_keys[:, :, :, -past_seq_len:] ) del past_keys start_index = end_index padded_past_values = first_past_kvs[j][1].new_zeros( padded_past_values_shape ) start_index = 0 for batch in batches: past_values = batch.past_key_values[j][1] # Clear reference to the original tensor batch.past_key_values[j][1] = None # Slicing end index for this batch end_index = start_index + len(batch) # We slice the past values to remove the padding from previous batches past_seq_len = batch.max_input_length - 1 padded_past_values[start_index:end_index, :, -past_seq_len:, :] = ( past_values[:, :, -past_seq_len:, :] ) del past_values # Update values start_index = end_index past_key_values.append([padded_past_keys, padded_past_values]) return cls( batch_id=batches[0].batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, pixel_values=pixel_values, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, past_key_values=past_key_values, all_input_ids=all_input_ids, input_lengths=input_lengths, prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, max_input_length=max_input_length, padding_right_offset=padding_right_offset, keys_head_dim_last=batches[0].keys_head_dim_last, max_tokens=max_tokens, ) def __len__(self): return len(self.requests) class IdeficsCausalLM(Model): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, speculator: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): self.quantize = quantize self.process_group, rank, world_size = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f"cuda:{rank}") # 9b seems to work correctly enough in float16, but 80b seems # to be really saturating for f16. dtype = torch.float16 if dtype is None else dtype elif SYSTEM == "ipex": if hasattr(torch, "xpu") and torch.xpu.is_available(): device = torch.device(f"xpu:{rank}") dtype = torch.float16 if dtype is None else dtype else: device = torch.device("cpu") # Float16 doesn't exist on target. dtype = torch.bfloat16 if dtype is None else dtype else: device = torch.device("cpu") dtype = torch.float32 if dtype is None else dtype self.device, self.dtype = device, dtype config = AutoConfig.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code, ) config.quantize = quantize config.speculator = speculator config.vision_config.quantize = quantize tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) self.processor = AutoProcessor.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) weights_loader = get_loader( quantize=quantize, model_id=model_id, revision=revision ) torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights( filenames, device=device, dtype=dtype, process_group=self.process_group, weights_loader=weights_loader, ) model = IdeficsForVisionText2Text(config, weights) self.config = config torch.distributed.barrier(group=self.process_group) super().__init__( model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, rank=rank, world_size=world_size, ) @property def batch_type(self) -> Type[IdeficsCausalLMBatch]: return IdeficsCausalLMBatch def forward( self, input_ids, attention_mask, position_ids, pixel_values, image_hidden_states, image_attention_mask, past_key_values: Optional = None, ) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]: # Model Forward kwargs = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "image_hidden_states": image_hidden_states, "image_attention_mask": image_attention_mask, "past_key_values": past_key_values, "use_cache": True, "return_dict": True, } if self.has_position_ids: kwargs["position_ids"] = position_ids outputs, speculative_logits = self.model.forward(**kwargs) return ( outputs.logits, speculative_logits, outputs.past_key_values, outputs.image_hidden_states, ) @tracer.start_as_current_span("generate_token") def generate_token( self, batch: IdeficsCausalLMBatch ) -> Tuple[List[Generation], Optional[IdeficsCausalLMBatch], Tuple[int, int]]: start = time.time_ns() # slice the attention mask to the correct shape attention_mask = batch.attention_mask[:, : -batch.padding_right_offset] if batch.image_attention_mask is None: image_attention_mask = None else: if batch.input_ids.size(1) == 1: # THIS is a hack: when calling idefics.generate, the first time, we need the whole image_attention_mask (size bs x max_seq_len x max_num_images), # but the subsequent times, we only need the last attention mask along the `max_seq_len` dimension # this is due to the nature IDEFICS: it's an encoder decoder, and so when decoding, only the currently generated # token need to attend to the encoder hidden states (i.e. the vision encoder) # Also see seq2seq_lm.Seq2SeqLM.generate_token which has roughly the same logic image_attention_mask = batch.image_attention_mask[ :, -(batch.padding_right_offset + 1) ].unsqueeze(1) else: image_attention_mask = batch.image_attention_mask[ :, : -batch.padding_right_offset ] logits, speculative_logits, past, image_hidden_states = self.forward( input_ids=batch.input_ids, attention_mask=attention_mask, position_ids=batch.position_ids, pixel_values=batch.pixel_values, image_hidden_states=batch.image_hidden_states, image_attention_mask=image_attention_mask, past_key_values=batch.past_key_values, ) # Hardcoded remove image tokens logits[:, 32000:32001] = torch.finfo(logits.dtype).min start_decode = time.time_ns() # Results generations: List[Generation] = [] stopped = True # Zipped iterator iterator = zip( batch.requests, batch.input_lengths, batch.prefix_offsets, batch.read_offsets, logits, batch.next_token_choosers, batch.stopping_criterias, batch.all_input_ids, ) # For each member of the batch for i, ( request, input_length, prefix_offset, read_offset, logits, next_token_chooser, stopping_criteria, all_input_ids, ) in enumerate(iterator): # Select next token next_token_id, logprobs = next_token_chooser( all_input_ids.view(1, -1), logits[-1:, :] ) # Append next token to all tokens all_input_ids = torch.cat([all_input_ids, next_token_id]) new_input_length = input_length + 1 # Generated token next_token_logprob = logprobs[-1, next_token_id] next_token_id_squeezed = next_token_id.squeeze() next_token_text, prefix_offset, read_offset = self.decode_token( all_input_ids[:, 0], prefix_offset, read_offset ) # Evaluate stopping criteria stop, reason = stopping_criteria( next_token_id_squeezed, next_token_text, ) if not stop: stopped = False # Shard generations # All generations will be appended in the rust sharded client if i % self.world_size == self.rank: if stop: # Decode generated tokens output_text, _, _ = self.decode_token( all_input_ids[:, 0], prefix_offset=len(all_input_ids) - stopping_criteria.current_tokens - 1, read_offset=len(all_input_ids) - stopping_criteria.current_tokens, skip_special_tokens=True, ) # Get seed if isinstance(next_token_chooser.choice, Sampling): seed = next_token_chooser.choice.seed else: seed = None generated_text = GeneratedText( output_text, stopping_criteria.current_tokens, reason, seed ) else: generated_text = None # Prefill if stopping_criteria.current_tokens == 1 and request.prefill_logprobs: # Remove generated token to only have prefill and add nan for first prompt token prefill_logprobs = [float("nan")] + torch.log_softmax( logits, -1 ).gather(1, all_input_ids[1:]).squeeze(1)[ -new_input_length:-1 ].tolist() prefill_token_ids = all_input_ids[-new_input_length:-1] prefill_texts = self.tokenizer.batch_decode( prefill_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) prefill_tokens = Tokens( prefill_token_ids, prefill_logprobs, prefill_texts, is_special=[], ) else: prefill_tokens = None top_tokens = None generation = Generation( request.id, prefill_tokens, Tokens( [next_token_id_squeezed], [next_token_logprob], [next_token_text], [next_token_id_squeezed.item() in self.all_special_ids], ), generated_text, top_tokens, ) generations.append(generation) # Update values batch.next_token_choosers[i] = batch.next_token_choosers[i].advance_grammar( next_token_id_squeezed.item() ) batch.input_ids[i, 0] = next_token_id batch.all_input_ids[i] = all_input_ids batch.input_lengths[i] = new_input_length batch.prefix_offsets[i] = prefix_offset batch.read_offsets[i] = read_offset batch.max_input_length = max(batch.max_input_length, new_input_length) # We finished all generations in the batch; there is no next batch if stopped: forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, None, (forward_ns, decode_ns) # Slice unused values from prefill batch.input_ids = batch.input_ids[:, :1] # Update attention_mask as we added a new token to input_ids batch.attention_mask[:, -batch.padding_right_offset] = 1 batch.image_attention_mask[:, -batch.padding_right_offset, :] = ( batch.image_attention_mask[:, -(batch.padding_right_offset + 1), :] ) # Decrease right offset batch.padding_right_offset -= 1 # Update position_ids batch.position_ids = batch.position_ids[:, -1:] + 1 # Update past key values batch.past_key_values = past batch.image_hidden_states = image_hidden_states forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, batch, (forward_ns, decode_ns)
text-generation-inference/server/text_generation_server/models/idefics_causal_lm.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/idefics_causal_lm.py", "repo_id": "text-generation-inference", "token_count": 17112 }
283
nodeLinker: node-modules npmAuditRegistry: 'https://registry.npmjs.org' yarnPath: .yarn/releases/yarn-3.5.1.cjs
tokenizers/bindings/node/.yarnrc.yml/0
{ "file_path": "tokenizers/bindings/node/.yarnrc.yml", "repo_id": "tokenizers", "token_count": 53 }
284
/* eslint-disable @typescript-eslint/no-empty-function */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { BPE, Unigram, WordPiece } from '../../' const MOCKS_DIR = __dirname + '/__mocks__' describe('WordPiece', () => { describe('fromFile', () => { it('throws if called with only one argument', () => { expect(() => (WordPiece as any).fromFile()).toThrow( 'Failed to convert JavaScript value `Undefined` into rust type `String`', ) }) it('throws if called with 2 arguments without a callback as third argument', () => { expect(() => (WordPiece as any).fromFile({})).toThrow( 'Failed to convert JavaScript value `Object {}` into rust type `String`', ) }) it('has its callback called with the loaded model', async () => { const model = await WordPiece.fromFile(`${MOCKS_DIR}/vocab.txt`) expect(model).toBeDefined() }) }) }) describe('BPE', () => { describe('fromFile', () => { it('has its callback called with the loaded model', async () => { const model = await BPE.fromFile(`${MOCKS_DIR}/vocab.json`, `${MOCKS_DIR}/merges.txt`) expect(model).toBeDefined() }) it('has its callback called with the loaded model', async () => { const model = await BPE.fromFile(`${MOCKS_DIR}/vocab.json`, `${MOCKS_DIR}/merges.txt`, {}) expect(model).toBeDefined() }) }) describe('When initialized from memory', () => { it('returns the loaded Model', () => { const bpe = BPE.init({ a: 0, b: 1, ab: 2 }, [['a', 'b']]) // expect(bpe.constructor.name).toEqual("Model"); expect(bpe.constructor.name).toEqual('BPE') }) }) }) describe('Unigram', () => { it('can be initialized from memory', () => { const unigram = Unigram.init( [ ['<unk>', 0], ['Hello', -1], ['there', -2], ], { unkId: 0, }, ) expect(unigram.constructor.name).toEqual('Unigram') }) })
tokenizers/bindings/node/lib/bindings/models.test.ts/0
{ "file_path": "tokenizers/bindings/node/lib/bindings/models.test.ts", "repo_id": "tokenizers", "token_count": 818 }
285
# `tokenizers-linux-arm-gnueabihf` This is the **armv7-unknown-linux-gnueabihf** binary for `tokenizers`
tokenizers/bindings/node/npm/linux-arm-gnueabihf/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/linux-arm-gnueabihf/README.md", "repo_id": "tokenizers", "token_count": 42 }
286
{ "name": "tokenizers", "version": "0.15.3-dev0", "repository": { "type": "git", "url": "git+https://github.com/huggingface/tokenizers.git" }, "bugs": { "url": "https://github.com/huggingface/tokenizers/issues" }, "homepage": "https://github.com/huggingface/tokenizers/tree/master/bindings/node", "author": "Anthony MOI <m.anthony.moi@gmail.com>", "license": "Apache-2.0", "description": "Provides an implementation of today's most used tokenizers, with a focus on performances and versatility.", "files": [ "index.d.ts", "index.js" ], "napi": { "name": "tokenizers", "triples": { "defaults": true, "additional": [ "x86_64-unknown-linux-musl", "aarch64-unknown-linux-gnu", "i686-pc-windows-msvc", "armv7-unknown-linux-gnueabihf", "aarch64-apple-darwin", "aarch64-linux-android", "x86_64-unknown-freebsd", "aarch64-unknown-linux-musl", "aarch64-pc-windows-msvc", "armv7-linux-androideabi" ] } }, "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "scripts": { "artifacts": "napi artifacts", "bench": "node -r @swc-node/register benchmark/bench.ts", "build": "napi build --platform --release --pipe \"prettier -w\"", "build:debug": "napi build --platform --pipe \"prettier -w\"", "format": "run-p format:prettier format:rs format:toml", "format:prettier": "prettier . -w", "format:toml": "taplo format", "format:rs": "cargo fmt", "lint": "eslint . -c ./.eslintrc.yml", "prepublishOnly": "napi prepublish -t npm", "test": "jest", "version": "napi version" }, "devDependencies": { "@napi-rs/cli": "^2.14.6", "@swc-node/register": "^1.5.5", "@swc/core": "^1.3.32", "@taplo/cli": "^0.5.2", "@types/jest": "^29.5.1", "@typescript-eslint/eslint-plugin": "^5.50.0", "@typescript-eslint/parser": "^5.50.0", "ava": "^5.1.1", "benny": "^3.7.1", "chalk": "^5.2.0", "eslint": "^8.33.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-prettier": "^4.2.1", "husky": "^8.0.3", "jest": "^29.5.0", "lint-staged": "^13.1.0", "npm-run-all": "^4.1.5", "prettier": "^2.8.3", "ts-jest": "^29.1.0", "typescript": "^5.0.0" }, "lint-staged": { "*.@(js|ts|tsx)": [ "eslint -c .eslintrc.yml --fix" ], "*.@(js|ts|tsx|yml|yaml|md|json)": [ "prettier --write" ], "*.toml": [ "taplo format" ] }, "ava": { "require": [ "@swc-node/register" ], "extensions": [ "ts" ], "timeout": "2m", "workerThreads": false, "environmentVariables": { "TS_NODE_PROJECT": "./tsconfig.json" } }, "prettier": { "printWidth": 120, "semi": false, "trailingComma": "all", "singleQuote": true, "arrowParens": "always" }, "packageManager": "yarn@3.5.1" }
tokenizers/bindings/node/package.json/0
{ "file_path": "tokenizers/bindings/node/package.json", "repo_id": "tokenizers", "token_count": 1532 }
287
{ "compilerOptions": { "target": "ES2018", "strict": true, "moduleResolution": "node", "module": "CommonJS", "noUnusedLocals": true, "noUnusedParameters": true, "esModuleInterop": true, "allowSyntheticDefaultImports": true }, "include": ["."], "exclude": ["node_modules"] }
tokenizers/bindings/node/tsconfig.json/0
{ "file_path": "tokenizers/bindings/node/tsconfig.json", "repo_id": "tokenizers", "token_count": 129 }
288
import datasets from tokenizers import Tokenizer, models, normalizers, pre_tokenizers # Build a tokenizer bpe_tokenizer = Tokenizer(models.BPE()) bpe_tokenizer.pre_tokenizer = pre_tokenizers.Whitespace() bpe_tokenizer.normalizer = normalizers.Lowercase() # Initialize a dataset dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train") # Build an iterator over this dataset def batch_iterator(): batch_size = 1000 for batch in dataset.iter(batch_size=batch_size): yield batch["text"] # And finally train bpe_tokenizer.train_from_iterator(batch_iterator(), length=len(dataset))
tokenizers/bindings/python/examples/train_with_datasets.py/0
{ "file_path": "tokenizers/bindings/python/examples/train_with_datasets.py", "repo_id": "tokenizers", "token_count": 207 }
289
# Generated content DO NOT EDIT class Normalizer: """ Base class for all normalizers This class is not supposed to be instantiated directly. Instead, any implementation of a Normalizer will return an instance of this class when instantiated. """ def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class BertNormalizer(Normalizer): """ BertNormalizer Takes care of normalizing raw text before giving it to a Bert model. This includes cleaning the text, handling accents, chinese chars and lowercasing Args: clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to clean the text, by removing any control characters and replacing all whitespaces by the classic one. handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to handle chinese chars by putting spaces around them. strip_accents (:obj:`bool`, `optional`): Whether to strip all accents. If this option is not specified (ie == None), then it will be determined by the value for `lowercase` (as in the original Bert). lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to lowercase. """ def __init__(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class ByteLevel(Normalizer): """ Bytelevel Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Lowercase(Normalizer): """ Lowercase Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class NFC(Normalizer): """ NFC Unicode Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class NFD(Normalizer): """ NFD Unicode Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class NFKC(Normalizer): """ NFKC Unicode Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class NFKD(Normalizer): """ NFKD Unicode Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Nmt(Normalizer): """ Nmt normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Precompiled(Normalizer): """ Precompiled normalizer Don't use manually it is used for compatibility for SentencePiece. """ def __init__(self, precompiled_charsmap): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Prepend(Normalizer): """ Prepend normalizer """ def __init__(self, prepend): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Replace(Normalizer): """ Replace normalizer """ def __init__(self, pattern, content): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Sequence(Normalizer): """ Allows concatenating multiple other Normalizer as a Sequence. All the normalizers run in sequence in the given order Args: normalizers (:obj:`List[Normalizer]`): A list of Normalizer to be run as a sequence """ def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Strip(Normalizer): """ Strip normalizer """ def __init__(self, left=True, right=True): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class StripAccents(Normalizer): """ StripAccents normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass
tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.pyi/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.pyi", "repo_id": "tokenizers", "token_count": 8593 }
290
use std::sync::{Arc, RwLock}; use crate::pre_tokenizers::from_string; use crate::tokenizer::PyTokenizer; use crate::utils::PyPattern; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::de::Error; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use tk::decoders::bpe::BPEDecoder; use tk::decoders::byte_fallback::ByteFallback; use tk::decoders::byte_level::ByteLevel; use tk::decoders::ctc::CTC; use tk::decoders::fuse::Fuse; use tk::decoders::metaspace::{Metaspace, PrependScheme}; use tk::decoders::sequence::Sequence; use tk::decoders::strip::Strip; use tk::decoders::wordpiece::WordPiece; use tk::decoders::DecoderWrapper; use tk::normalizers::replace::Replace; use tk::Decoder; use tokenizers as tk; use super::error::ToPyResult; /// Base class for all decoders /// /// This class is not supposed to be instantiated directly. Instead, any implementation of /// a Decoder will return an instance of this class when instantiated. #[pyclass(dict, module = "tokenizers.decoders", name = "Decoder", subclass)] #[derive(Clone, Deserialize, Serialize)] #[serde(transparent)] pub struct PyDecoder { pub(crate) decoder: PyDecoderWrapper, } impl PyDecoder { pub(crate) fn new(decoder: PyDecoderWrapper) -> Self { PyDecoder { decoder } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match &self.decoder { PyDecoderWrapper::Custom(_) => Py::new(py, base)?.into_pyobject(py)?.into_any().into(), PyDecoderWrapper::Wrapped(inner) => match &*inner.as_ref().read().unwrap() { DecoderWrapper::Metaspace(_) => Py::new(py, (PyMetaspaceDec {}, base))? .into_pyobject(py)? .into_any() .into(), DecoderWrapper::WordPiece(_) => Py::new(py, (PyWordPieceDec {}, base))? .into_pyobject(py)? .into_any() .into(), DecoderWrapper::ByteFallback(_) => Py::new(py, (PyByteFallbackDec {}, base))? .into_pyobject(py)? .into_any() .into(), DecoderWrapper::Strip(_) => Py::new(py, (PyStrip {}, base))? .into_pyobject(py)? .into_any() .into(), DecoderWrapper::Fuse(_) => Py::new(py, (PyFuseDec {}, base))? .into_pyobject(py)? .into_any() .into(), DecoderWrapper::ByteLevel(_) => Py::new(py, (PyByteLevelDec {}, base))? .into_pyobject(py)? .into_any() .into(), DecoderWrapper::Replace(_) => Py::new(py, (PyReplaceDec {}, base))? .into_pyobject(py)? .into_any() .into(), DecoderWrapper::BPE(_) => Py::new(py, (PyBPEDecoder {}, base))? .into_pyobject(py)? .into_any() .into(), DecoderWrapper::CTC(_) => Py::new(py, (PyCTCDecoder {}, base))? .into_pyobject(py)? .into_any() .into(), DecoderWrapper::Sequence(_) => Py::new(py, (PySequenceDecoder {}, base))? .into_pyobject(py)? .into_any() .into(), }, }) } } impl Decoder for PyDecoder { fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { self.decoder.decode_chain(tokens) } } #[pymethods] impl PyDecoder { #[staticmethod] fn custom(decoder: PyObject) -> Self { let decoder = PyDecoderWrapper::Custom(Arc::new(RwLock::new(CustomDecoder::new(decoder)))); PyDecoder::new(decoder) } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.decoder).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Decoder: {e}" )) })?; Ok(PyBytes::new(py, data.as_bytes()).into()) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&[u8]>(py) { Ok(s) => { self.decoder = serde_json::from_slice(s).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Decoder: {e}" )) })?; Ok(()) } Err(e) => Err(e), } } /// Decode the given list of tokens to a final string /// /// Args: /// tokens (:obj:`List[str]`): /// The list of tokens to decode /// /// Returns: /// :obj:`str`: The decoded string #[pyo3(text_signature = "(self, tokens)")] fn decode(&self, tokens: Vec<String>) -> PyResult<String> { ToPyResult(self.decoder.decode(tokens)).into() } fn __repr__(&self) -> PyResult<String> { crate::utils::serde_pyo3::repr(self) .map_err(|e| exceptions::PyException::new_err(e.to_string())) } fn __str__(&self) -> PyResult<String> { crate::utils::serde_pyo3::to_string(self) .map_err(|e| exceptions::PyException::new_err(e.to_string())) } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let PyDecoderWrapper::Wrapped(ref wrap) = super_.decoder { if let DecoderWrapper::$variant(ref dec) = *wrap.read().unwrap() { dec.$($name)+ } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyDecoderWrapper::Wrapped(ref wrap) = super_.decoder { if let DecoderWrapper::$variant(ref mut dec) = *wrap.write().unwrap() { dec.$name = $value; } } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyDecoderWrapper::Wrapped(ref wrap) = super_.decoder { if let DecoderWrapper::$variant(ref mut dec) = *wrap.write().unwrap() { dec.$name($value); } } }}; } /// ByteLevel Decoder /// /// This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel` /// :class:`~tokenizers.pre_tokenizers.PreTokenizer`. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "ByteLevel")] pub struct PyByteLevelDec {} #[pymethods] impl PyByteLevelDec { #[new] #[pyo3(signature = (**_kwargs), text_signature = "(self)")] fn new(_kwargs: Option<&Bound<'_, PyDict>>) -> (Self, PyDecoder) { (PyByteLevelDec {}, ByteLevel::default().into()) } } /// Replace Decoder /// /// This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace` /// :class:`~tokenizers.pre_tokenizers.PreTokenizer`. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Replace")] pub struct PyReplaceDec {} #[pymethods] impl PyReplaceDec { #[new] #[pyo3(text_signature = "(self, pattern, content)")] fn new(pattern: PyPattern, content: String) -> PyResult<(Self, PyDecoder)> { Ok(( PyReplaceDec {}, ToPyResult(Replace::new(pattern, content)).into_py()?.into(), )) } } /// WordPiece Decoder /// /// Args: /// prefix (:obj:`str`, `optional`, defaults to :obj:`##`): /// The prefix to use for subwords that are not a beginning-of-word /// /// cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation, /// and some abbreviated english forms. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "WordPiece")] pub struct PyWordPieceDec {} #[pymethods] impl PyWordPieceDec { #[getter] fn get_prefix(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, prefix.clone()) } #[setter] fn set_prefix(self_: PyRef<Self>, prefix: String) { setter!(self_, WordPiece, prefix, prefix); } #[getter] fn get_cleanup(self_: PyRef<Self>) -> bool { getter!(self_, WordPiece, cleanup) } #[setter] fn set_cleanup(self_: PyRef<Self>, cleanup: bool) { setter!(self_, WordPiece, cleanup, cleanup); } #[new] #[pyo3(signature = (prefix = String::from("##"), cleanup = true), text_signature = "(self, prefix=\"##\", cleanup=True)")] fn new(prefix: String, cleanup: bool) -> (Self, PyDecoder) { (PyWordPieceDec {}, WordPiece::new(prefix, cleanup).into()) } } /// ByteFallback Decoder /// ByteFallback is a simple trick which converts tokens looking like `<0x61>` /// to pure bytes, and attempts to make them into a string. If the tokens /// cannot be decoded you will get � instead for each inconvertible byte token /// #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "ByteFallback")] pub struct PyByteFallbackDec {} #[pymethods] impl PyByteFallbackDec { #[new] #[pyo3(signature = (), text_signature = "(self)")] fn new() -> (Self, PyDecoder) { (PyByteFallbackDec {}, ByteFallback::new().into()) } } /// Fuse Decoder /// Fuse simply fuses every token into a single string. /// This is the last step of decoding, this decoder exists only if /// there is need to add other decoders *after* the fusion #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Fuse")] pub struct PyFuseDec {} #[pymethods] impl PyFuseDec { #[new] #[pyo3(signature = (), text_signature = "(self)")] fn new() -> (Self, PyDecoder) { (PyFuseDec {}, Fuse::new().into()) } } /// Strip normalizer /// Strips n left characters of each token, or n right characters of each token #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Strip")] pub struct PyStrip {} #[pymethods] impl PyStrip { #[getter] fn get_start(self_: PyRef<Self>) -> usize { getter!(self_, Strip, start) } #[setter] fn set_start(self_: PyRef<Self>, start: usize) { setter!(self_, Strip, start, start) } #[getter] fn get_stop(self_: PyRef<Self>) -> usize { getter!(self_, Strip, stop) } #[setter] fn set_stop(self_: PyRef<Self>, stop: usize) { setter!(self_, Strip, stop, stop) } #[getter] fn get_content(self_: PyRef<Self>) -> char { getter!(self_, Strip, content) } #[setter] fn set_content(self_: PyRef<Self>, content: char) { setter!(self_, Strip, content, content) } #[new] #[pyo3(signature = (content=' ', left=0, right=0), text_signature = "(self, content, left=0, right=0)")] fn new(content: char, left: usize, right: usize) -> (Self, PyDecoder) { (PyStrip {}, Strip::new(content, left, right).into()) } } /// Metaspace Decoder /// /// Args: /// replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): /// The replacement character. Must be exactly one character. By default we /// use the `▁` (U+2581) meta symbol (Same as in SentencePiece). /// /// prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. /// Choices: "always", "never", "first". First means the space is only added on the first /// token (relevant when special tokens are used or other pre_tokenizer are used). #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Metaspace")] pub struct PyMetaspaceDec {} #[pymethods] impl PyMetaspaceDec { #[getter] fn get_replacement(self_: PyRef<Self>) -> String { getter!(self_, Metaspace, get_replacement().to_string()) } #[setter] fn set_replacement(self_: PyRef<Self>, replacement: char) { setter!(self_, Metaspace, @set_replacement, replacement); } #[getter] fn get_split(self_: PyRef<Self>) -> bool { getter!(self_, Metaspace, get_split()) } #[setter] fn set_split(self_: PyRef<Self>, split: bool) { setter!(self_, Metaspace, @set_split, split); } #[getter] fn get_prepend_scheme(self_: PyRef<Self>) -> String { // Assuming Metaspace has a method to get the prepend_scheme as a string let scheme: PrependScheme = getter!(self_, Metaspace, get_prepend_scheme()); match scheme { PrependScheme::First => "first", PrependScheme::Never => "never", PrependScheme::Always => "always", } .to_string() } #[setter] fn set_prepend_scheme(self_: PyRef<Self>, prepend_scheme: String) -> PyResult<()> { let scheme = from_string(prepend_scheme)?; setter!(self_, Metaspace, @set_prepend_scheme, scheme); Ok(()) } #[new] #[pyo3(signature = (replacement = '▁', prepend_scheme = String::from("always"), split = true), text_signature = "(self, replacement = \"▁\", prepend_scheme = \"always\", split = True)")] fn new(replacement: char, prepend_scheme: String, split: bool) -> PyResult<(Self, PyDecoder)> { let prepend_scheme = from_string(prepend_scheme)?; Ok(( PyMetaspaceDec {}, Metaspace::new(replacement, prepend_scheme, split).into(), )) } } /// BPEDecoder Decoder /// /// Args: /// suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`): /// The suffix that was used to characterize an end-of-word. This suffix will /// be replaced by whitespaces during the decoding #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "BPEDecoder")] pub struct PyBPEDecoder {} #[pymethods] impl PyBPEDecoder { #[getter] fn get_suffix(self_: PyRef<Self>) -> String { getter!(self_, BPE, suffix.clone()) } #[setter] fn set_suffix(self_: PyRef<Self>, suffix: String) { setter!(self_, BPE, suffix, suffix); } #[new] #[pyo3(signature = (suffix = String::from("</w>")), text_signature = "(self, suffix=\"</w>\")")] fn new(suffix: String) -> (Self, PyDecoder) { (PyBPEDecoder {}, BPEDecoder::new(suffix).into()) } } /// CTC Decoder /// /// Args: /// pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`): /// The pad token used by CTC to delimit a new token. /// word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`): /// The word delimiter token. It will be replaced by a <space> /// cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to cleanup some tokenization artifacts. /// Mainly spaces before punctuation, and some abbreviated english forms. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "CTC")] pub struct PyCTCDecoder {} #[pymethods] impl PyCTCDecoder { #[getter] fn get_pad_token(self_: PyRef<Self>) -> String { getter!(self_, CTC, pad_token.clone()) } #[setter] fn set_pad_token(self_: PyRef<Self>, pad_token: String) { setter!(self_, CTC, pad_token, pad_token); } #[getter] fn get_word_delimiter_token(self_: PyRef<Self>) -> String { getter!(self_, CTC, word_delimiter_token.clone()) } #[setter] fn set_word_delimiter_token(self_: PyRef<Self>, word_delimiter_token: String) { setter!(self_, CTC, word_delimiter_token, word_delimiter_token); } #[getter] fn get_cleanup(self_: PyRef<Self>) -> bool { getter!(self_, CTC, cleanup) } #[setter] fn set_cleanup(self_: PyRef<Self>, cleanup: bool) { setter!(self_, CTC, cleanup, cleanup); } #[new] #[pyo3(signature = ( pad_token = String::from("<pad>"), word_delimiter_token = String::from("|"), cleanup = true ), text_signature = "(self, pad_token=\"<pad>\", word_delimiter_token=\"|\", cleanup=True)")] fn new(pad_token: String, word_delimiter_token: String, cleanup: bool) -> (Self, PyDecoder) { ( PyCTCDecoder {}, CTC::new(pad_token, word_delimiter_token, cleanup).into(), ) } } /// Sequence Decoder /// /// Args: /// decoders (:obj:`List[Decoder]`) /// The decoders that need to be chained #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name="Sequence")] pub struct PySequenceDecoder {} #[pymethods] impl PySequenceDecoder { #[new] #[pyo3(signature = (decoders_py), text_signature = "(self, decoders)")] fn new(decoders_py: &Bound<'_, PyList>) -> PyResult<(Self, PyDecoder)> { let mut decoders: Vec<DecoderWrapper> = Vec::with_capacity(decoders_py.len()); for decoder_py in decoders_py.iter() { let decoder: PyRef<PyDecoder> = decoder_py.extract()?; let decoder = match &decoder.decoder { PyDecoderWrapper::Wrapped(inner) => inner, PyDecoderWrapper::Custom(_) => unimplemented!(), }; decoders.push(decoder.read().unwrap().clone()); } Ok((PySequenceDecoder {}, Sequence::new(decoders).into())) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> PyResult<Bound<'p, PyTuple>> { PyTuple::new(py, [PyList::empty(py)]) } } pub(crate) struct CustomDecoder { inner: PyObject, } impl CustomDecoder { pub(crate) fn new(inner: PyObject) -> Self { CustomDecoder { inner } } } impl Decoder for CustomDecoder { fn decode(&self, tokens: Vec<String>) -> tk::Result<String> { Python::with_gil(|py| { let decoded = self .inner .call_method(py, "decode", (tokens,), None)? .extract(py)?; Ok(decoded) }) } fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { Python::with_gil(|py| { let decoded = self .inner .call_method(py, "decode_chain", (tokens,), None)? .extract(py)?; Ok(decoded) }) } } impl Serialize for CustomDecoder { fn serialize<S>(&self, _serializer: S) -> std::result::Result<S::Ok, S::Error> where S: Serializer, { Err(serde::ser::Error::custom( "Custom PyDecoder cannot be serialized", )) } } impl<'de> Deserialize<'de> for CustomDecoder { fn deserialize<D>(_deserializer: D) -> std::result::Result<Self, D::Error> where D: Deserializer<'de>, { Err(D::Error::custom("PyDecoder cannot be deserialized")) } } #[derive(Clone, Deserialize, Serialize)] #[serde(untagged)] pub(crate) enum PyDecoderWrapper { Custom(Arc<RwLock<CustomDecoder>>), Wrapped(Arc<RwLock<DecoderWrapper>>), } impl<I> From<I> for PyDecoderWrapper where I: Into<DecoderWrapper>, { fn from(norm: I) -> Self { PyDecoderWrapper::Wrapped(Arc::new(RwLock::new(norm.into()))) } } impl<I> From<I> for PyDecoder where I: Into<DecoderWrapper>, { fn from(dec: I) -> Self { PyDecoder { decoder: dec.into().into(), } } } impl Decoder for PyDecoderWrapper { fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { match self { PyDecoderWrapper::Wrapped(inner) => inner.read().unwrap().decode_chain(tokens), PyDecoderWrapper::Custom(inner) => inner.read().unwrap().decode_chain(tokens), } } } /// Decoders Module #[pymodule] pub fn decoders(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::<PyDecoder>()?; m.add_class::<PyByteLevelDec>()?; m.add_class::<PyReplaceDec>()?; m.add_class::<PyWordPieceDec>()?; m.add_class::<PyByteFallbackDec>()?; m.add_class::<PyFuseDec>()?; m.add_class::<PyStrip>()?; m.add_class::<PyMetaspaceDec>()?; m.add_class::<PyBPEDecoder>()?; m.add_class::<PyCTCDecoder>()?; m.add_class::<PySequenceDecoder>()?; m.add_class::<PyDecodeStream>()?; Ok(()) } /// Class needed for streaming decode /// #[pyclass(module = "tokenizers.decoders", name = "DecodeStream")] #[derive(Clone)] pub struct PyDecodeStream { /// Regular decode option that is kept throughout. skip_special_tokens: bool, /// A temporary buffer of the necessary token_ids needed /// to produce valid string chunks. /// This typically contains 3 parts: /// - read /// - prefix /// - rest /// /// Read is the bit necessary to surround the prefix /// so decoding the whole ids produces a valid prefix. /// Prefix is the previously produced string, kept around to trim off of /// the next valid chunk ids: Vec<u32>, /// The previously returned chunk that needs to be discarded from the /// decoding of the current ids to produce the next chunk prefix: String, /// The index within the ids corresponding to the prefix so we can drain /// correctly prefix_index: usize, } #[pymethods] impl PyDecodeStream { #[new] #[pyo3(signature = (skip_special_tokens), text_signature = "(self, skip_special_tokens)")] fn new(skip_special_tokens: bool) -> Self { PyDecodeStream { skip_special_tokens, ids: vec![], prefix: "".to_string(), prefix_index: 0, } } #[pyo3(signature = (tokenizer, id), text_signature = "(self, tokenizer, id)")] fn step(&mut self, tokenizer: &PyTokenizer, id: u32) -> PyResult<Option<String>> { ToPyResult(tk::tokenizer::step_decode_stream( &tokenizer.tokenizer, id, self.skip_special_tokens, &mut self.ids, &mut self.prefix, &mut self.prefix_index, )) .into() } } #[cfg(test)] mod test { use std::sync::{Arc, RwLock}; use pyo3::prelude::*; use tk::decoders::metaspace::Metaspace; use tk::decoders::DecoderWrapper; use crate::decoders::{CustomDecoder, PyDecoder, PyDecoderWrapper}; #[test] fn get_subtype() { Python::with_gil(|py| { let py_dec = PyDecoder::new(Metaspace::default().into()); let py_meta = py_dec.get_as_subtype(py).unwrap(); assert_eq!("Metaspace", py_meta.bind(py).get_type().qualname().unwrap()); }) } #[test] fn serialize() { let py_wrapped: PyDecoderWrapper = Metaspace::default().into(); let py_ser = serde_json::to_string(&py_wrapped).unwrap(); let rs_wrapped = DecoderWrapper::Metaspace(Metaspace::default()); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_ser, rs_ser); let py_dec: PyDecoder = serde_json::from_str(&rs_ser).unwrap(); match py_dec.decoder { PyDecoderWrapper::Wrapped(msp) => match *msp.as_ref().read().unwrap() { DecoderWrapper::Metaspace(_) => {} _ => panic!("Expected Metaspace"), }, _ => panic!("Expected wrapped, not custom."), } let obj = Python::with_gil(|py| { let py_msp = PyDecoder::new(Metaspace::default().into()); let obj: PyObject = Py::new(py, py_msp) .unwrap() .into_pyobject(py) .unwrap() .into_any() .into(); obj }); let py_seq = PyDecoderWrapper::Custom(Arc::new(RwLock::new(CustomDecoder::new(obj)))); assert!(serde_json::to_string(&py_seq).is_err()); } }
tokenizers/bindings/python/src/decoders.rs/0
{ "file_path": "tokenizers/bindings/python/src/decoders.rs", "repo_id": "tokenizers", "token_count": 11001 }
291
use serde::de::value::Error; use serde::{ser, Serialize}; type Result<T> = ::std::result::Result<T, Error>; pub struct Serializer { // This string starts empty and JSON is appended as values are serialized. output: String, /// Each levels remembers its own number of elements num_elements: Vec<usize>, max_elements: usize, level: usize, max_depth: usize, /// Maximum string representation /// Useful to ellipsis precompiled_charmap max_string: usize, } // By convention, the public API of a Serde serializer is one or more `to_abc` // functions such as `to_string`, `to_bytes`, or `to_writer` depending on what // Rust types the serializer is able to produce as output. // // This basic serializer supports only `to_string`. pub fn to_string<T>(value: &T) -> Result<String> where T: Serialize, { let max_depth = 20; let max_elements = 6; let max_string = 100; let mut serializer = Serializer { output: String::new(), level: 0, max_depth, max_elements, num_elements: vec![0; max_depth], max_string, }; value.serialize(&mut serializer)?; Ok(serializer.output) } pub fn repr<T>(value: &T) -> Result<String> where T: Serialize, { let max_depth = 200; let max_string = usize::MAX; let mut serializer = Serializer { output: String::new(), level: 0, max_depth, max_elements: 100, num_elements: vec![0; max_depth], max_string, }; value.serialize(&mut serializer)?; Ok(serializer.output) } impl ser::Serializer for &mut Serializer { // The output type produced by this `Serializer` during successful // serialization. Most serializers that produce text or binary output should // set `Ok = ()` and serialize into an `io::Write` or buffer contained // within the `Serializer` instance, as happens here. Serializers that build // in-memory data structures may be simplified by using `Ok` to propagate // the data structure around. type Ok = (); // The error type when some error occurs during serialization. type Error = Error; // Associated types for keeping track of additional state while serializing // compound data structures like sequences and maps. In this case no // additional state is required beyond what is already stored in the // Serializer struct. type SerializeSeq = Self; type SerializeTuple = Self; type SerializeTupleStruct = Self; type SerializeTupleVariant = Self; type SerializeMap = Self; type SerializeStruct = Self; type SerializeStructVariant = Self; // Here we go with the simple methods. The following 12 methods receive one // of the primitive types of the data model and map it to JSON by appending // into the output string. fn serialize_bool(self, v: bool) -> Result<()> { self.output += if v { "True" } else { "False" }; Ok(()) } // JSON does not distinguish between different sizes of integers, so all // signed integers will be serialized the same and all unsigned integers // will be serialized the same. Other formats, especially compact binary // formats, may need independent logic for the different sizes. fn serialize_i8(self, v: i8) -> Result<()> { self.serialize_i64(i64::from(v)) } fn serialize_i16(self, v: i16) -> Result<()> { self.serialize_i64(i64::from(v)) } fn serialize_i32(self, v: i32) -> Result<()> { self.serialize_i64(i64::from(v)) } // Not particularly efficient but this is example code anyway. A more // performant approach would be to use the `itoa` crate. fn serialize_i64(self, v: i64) -> Result<()> { self.output += &v.to_string(); Ok(()) } fn serialize_u8(self, v: u8) -> Result<()> { self.serialize_u64(u64::from(v)) } fn serialize_u16(self, v: u16) -> Result<()> { self.serialize_u64(u64::from(v)) } fn serialize_u32(self, v: u32) -> Result<()> { self.serialize_u64(u64::from(v)) } fn serialize_u64(self, v: u64) -> Result<()> { self.output += &v.to_string(); Ok(()) } fn serialize_f32(self, v: f32) -> Result<()> { self.serialize_f64(f64::from(v)) } fn serialize_f64(self, v: f64) -> Result<()> { self.output += &v.to_string(); Ok(()) } // Serialize a char as a single-character string. Other formats may // represent this differently. fn serialize_char(self, v: char) -> Result<()> { self.serialize_str(&v.to_string()) } // This only works for strings that don't require escape sequences but you // get the idea. For example it would emit invalid JSON if the input string // contains a '"' character. fn serialize_str(self, v: &str) -> Result<()> { self.output += "\""; if v.len() > self.max_string { self.output += &v[..self.max_string]; self.output += "..."; } else { self.output += v; } self.output += "\""; Ok(()) } // Serialize a byte array as an array of bytes. Could also use a base64 // string here. Binary formats will typically represent byte arrays more // compactly. fn serialize_bytes(self, v: &[u8]) -> Result<()> { use serde::ser::SerializeSeq; let mut seq = self.serialize_seq(Some(v.len()))?; for byte in v { seq.serialize_element(byte)?; } seq.end() } // An absent optional is represented as the JSON `null`. fn serialize_none(self) -> Result<()> { self.serialize_unit() } // A present optional is represented as just the contained value. Note that // this is a lossy representation. For example the values `Some(())` and // `None` both serialize as just `null`. Unfortunately this is typically // what people expect when working with JSON. Other formats are encouraged // to behave more intelligently if possible. fn serialize_some<T>(self, value: &T) -> Result<()> where T: ?Sized + Serialize, { value.serialize(self) } // In Serde, unit means an anonymous value containing no data. Map this to // JSON as `null`. fn serialize_unit(self) -> Result<()> { self.output += "None"; Ok(()) } // Unit struct means a named value containing no data. Again, since there is // no data, map this to JSON as `null`. There is no need to serialize the // name in most formats. fn serialize_unit_struct(self, _name: &'static str) -> Result<()> { self.serialize_unit() } // When serializing a unit variant (or any other kind of variant), formats // can choose whether to keep track of it by index or by name. Binary // formats typically use the index of the variant and human-readable formats // typically use the name. fn serialize_unit_variant( self, _name: &'static str, _variant_index: u32, variant: &'static str, ) -> Result<()> { // self.serialize_str(variant) self.output += variant; Ok(()) } // As is done here, serializers are encouraged to treat newtype structs as // insignificant wrappers around the data they contain. fn serialize_newtype_struct<T>(self, _name: &'static str, value: &T) -> Result<()> where T: ?Sized + Serialize, { value.serialize(self) } // Note that newtype variant (and all of the other variant serialization // methods) refer exclusively to the "externally tagged" enum // representation. // // Serialize this to JSON in externally tagged form as `{ NAME: VALUE }`. fn serialize_newtype_variant<T>( self, _name: &'static str, _variant_index: u32, variant: &'static str, value: &T, ) -> Result<()> where T: ?Sized + Serialize, { // variant.serialize(&mut *self)?; self.output += variant; self.output += "("; value.serialize(&mut *self)?; self.output += ")"; Ok(()) } // Now we get to the serialization of compound types. // // The start of the sequence, each value, and the end are three separate // method calls. This one is responsible only for serializing the start, // which in JSON is `[`. // // The length of the sequence may or may not be known ahead of time. This // doesn't make a difference in JSON because the length is not represented // explicitly in the serialized form. Some serializers may only be able to // support sequences for which the length is known up front. fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq> { self.output += "["; self.level = std::cmp::min(self.max_depth - 1, self.level + 1); self.num_elements[self.level] = 0; Ok(self) } // Tuples look just like sequences in JSON. Some formats may be able to // represent tuples more efficiently by omitting the length, since tuple // means that the corresponding `Deserialize implementation will know the // length without needing to look at the serialized data. fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple> { self.output += "("; self.level = std::cmp::min(self.max_depth - 1, self.level + 1); self.num_elements[self.level] = 0; Ok(self) } // Tuple structs look just like sequences in JSON. fn serialize_tuple_struct( self, _name: &'static str, len: usize, ) -> Result<Self::SerializeTupleStruct> { self.serialize_tuple(len) } // Tuple variants are represented in JSON as `{ NAME: [DATA...] }`. Again // this method is only responsible for the externally tagged representation. fn serialize_tuple_variant( self, _name: &'static str, _variant_index: u32, variant: &'static str, _len: usize, ) -> Result<Self::SerializeTupleVariant> { // variant.serialize(&mut *self)?; self.output += variant; self.output += "("; self.level = std::cmp::min(self.max_depth - 1, self.level + 1); self.num_elements[self.level] = 0; Ok(self) } // Maps are represented in JSON as `{ K: V, K: V, ... }`. fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap> { self.output += "{"; self.level = std::cmp::min(self.max_depth - 1, self.level + 1); self.num_elements[self.level] = 0; Ok(self) } // Structs look just like maps in JSON. In particular, JSON requires that we // serialize the field names of the struct. Other formats may be able to // omit the field names when serializing structs because the corresponding // Deserialize implementation is required to know what the keys are without // looking at the serialized data. fn serialize_struct(self, name: &'static str, _len: usize) -> Result<Self::SerializeStruct> { // self.serialize_map(Some(len)) // name.serialize(&mut *self)?; if let Some(stripped) = name.strip_suffix("Helper") { self.output += stripped; } else { self.output += name } self.output += "("; self.level = std::cmp::min(self.max_depth - 1, self.level + 1); self.num_elements[self.level] = 0; Ok(self) } // Struct variants are represented in JSON as `{ NAME: { K: V, ... } }`. // This is the externally tagged representation. fn serialize_struct_variant( self, _name: &'static str, _variant_index: u32, variant: &'static str, _len: usize, ) -> Result<Self::SerializeStructVariant> { // variant.serialize(&mut *self)?; self.output += variant; self.output += "("; self.level = std::cmp::min(self.max_depth - 1, self.level + 1); self.num_elements[self.level] = 0; Ok(self) } } // The following 7 impls deal with the serialization of compound types like // sequences and maps. Serialization of such types is begun by a Serializer // method and followed by zero or more calls to serialize individual elements of // the compound type and one call to end the compound type. // // This impl is SerializeSeq so these methods are called after `serialize_seq` // is called on the Serializer. impl ser::SerializeSeq for &mut Serializer { // Must match the `Ok` type of the serializer. type Ok = (); // Must match the `Error` type of the serializer. type Error = Error; // Serialize a single element of the sequence. fn serialize_element<T>(&mut self, value: &T) -> Result<()> where T: ?Sized + Serialize, { self.num_elements[self.level] += 1; let num_elements = self.num_elements[self.level]; if num_elements < self.max_elements { if !self.output.ends_with('[') { self.output += ", "; } value.serialize(&mut **self) } else { if num_elements == self.max_elements { self.output += ", ..."; } Ok(()) } } // Close the sequence. fn end(self) -> Result<()> { self.num_elements[self.level] = 0; self.level = self.level.saturating_sub(1); self.output += "]"; Ok(()) } } // Same thing but for tuples. impl ser::SerializeTuple for &mut Serializer { type Ok = (); type Error = Error; fn serialize_element<T>(&mut self, value: &T) -> Result<()> where T: ?Sized + Serialize, { self.num_elements[self.level] += 1; let num_elements = self.num_elements[self.level]; if num_elements < self.max_elements { if !self.output.ends_with('(') { self.output += ", "; } value.serialize(&mut **self) } else { if num_elements == self.max_elements { self.output += ", ..."; } Ok(()) } } fn end(self) -> Result<()> { self.num_elements[self.level] = 0; self.level = self.level.saturating_sub(1); self.output += ")"; Ok(()) } } // Same thing but for tuple structs. impl ser::SerializeTupleStruct for &mut Serializer { type Ok = (); type Error = Error; fn serialize_field<T>(&mut self, value: &T) -> Result<()> where T: ?Sized + Serialize, { self.num_elements[self.level] += 1; let num_elements = self.num_elements[self.level]; if num_elements < self.max_elements { if !self.output.ends_with('(') { self.output += ", "; } value.serialize(&mut **self) } else { if num_elements == self.max_elements { self.output += ", ..."; } Ok(()) } } fn end(self) -> Result<()> { self.num_elements[self.level] = 0; self.level = self.level.saturating_sub(1); self.output += ")"; Ok(()) } } // Tuple variants are a little different. Refer back to the // `serialize_tuple_variant` method above: // // self.output += "{"; // variant.serialize(&mut *self)?; // self.output += ":["; // // So the `end` method in this impl is responsible for closing both the `]` and // the `}`. impl ser::SerializeTupleVariant for &mut Serializer { type Ok = (); type Error = Error; fn serialize_field<T>(&mut self, value: &T) -> Result<()> where T: ?Sized + Serialize, { self.num_elements[self.level] += 1; let num_elements = self.num_elements[self.level]; if num_elements < self.max_elements { if !self.output.ends_with('(') { self.output += ", "; } value.serialize(&mut **self) } else { if num_elements == self.max_elements { self.output += ", ..."; } Ok(()) } } fn end(self) -> Result<()> { self.num_elements[self.level] = 0; self.level = self.level.saturating_sub(1); self.output += ")"; Ok(()) } } // Some `Serialize` types are not able to hold a key and value in memory at the // same time so `SerializeMap` implementations are required to support // `serialize_key` and `serialize_value` individually. // // There is a third optional method on the `SerializeMap` trait. The // `serialize_entry` method allows serializers to optimize for the case where // key and value are both available simultaneously. In JSON it doesn't make a // difference so the default behavior for `serialize_entry` is fine. impl ser::SerializeMap for &mut Serializer { type Ok = (); type Error = Error; // The Serde data model allows map keys to be any serializable type. JSON // only allows string keys so the implementation below will produce invalid // JSON if the key serializes as something other than a string. // // A real JSON serializer would need to validate that map keys are strings. // This can be done by using a different Serializer to serialize the key // (instead of `&mut **self`) and having that other serializer only // implement `serialize_str` and return an error on any other data type. fn serialize_key<T>(&mut self, key: &T) -> Result<()> where T: ?Sized + Serialize, { self.num_elements[self.level] += 1; let num_elements = self.num_elements[self.level]; if num_elements < self.max_elements { if !self.output.ends_with('{') { self.output += ", "; } key.serialize(&mut **self) } else { if num_elements == self.max_elements { self.output += ", ..."; } Ok(()) } } // It doesn't make a difference whether the colon is printed at the end of // `serialize_key` or at the beginning of `serialize_value`. In this case // the code is a bit simpler having it here. fn serialize_value<T>(&mut self, value: &T) -> Result<()> where T: ?Sized + Serialize, { let num_elements = self.num_elements[self.level]; if num_elements < self.max_elements { self.output += ":"; value.serialize(&mut **self) } else { Ok(()) } } fn end(self) -> Result<()> { self.num_elements[self.level] = 0; self.level = self.level.saturating_sub(1); self.output += "}"; Ok(()) } } // Structs are like maps in which the keys are constrained to be compile-time // constant strings. impl ser::SerializeStruct for &mut Serializer { type Ok = (); type Error = Error; fn serialize_field<T>(&mut self, key: &'static str, value: &T) -> Result<()> where T: ?Sized + Serialize, { if !self.output.ends_with('(') { self.output += ", "; } // key.serialize(&mut **self)?; if key != "type" { self.output += key; self.output += "="; value.serialize(&mut **self) } else { Ok(()) } } fn end(self) -> Result<()> { self.num_elements[self.level] = 0; self.level = self.level.saturating_sub(1); self.output += ")"; Ok(()) } } // Similar to `SerializeTupleVariant`, here the `end` method is responsible for // closing both of the curly braces opened by `serialize_struct_variant`. impl ser::SerializeStructVariant for &mut Serializer { type Ok = (); type Error = Error; fn serialize_field<T>(&mut self, key: &'static str, value: &T) -> Result<()> where T: ?Sized + Serialize, { if !self.output.ends_with('(') { self.output += ", "; } // key.serialize(&mut **self)?; self.output += key; self.output += "="; value.serialize(&mut **self) } fn end(self) -> Result<()> { self.num_elements[self.level] = 0; self.level = self.level.saturating_sub(1); self.output += ")"; Ok(()) } } //////////////////////////////////////////////////////////////////////////////// #[test] fn test_basic() { assert_eq!(to_string(&true).unwrap(), "True"); assert_eq!(to_string(&Some(1)).unwrap(), "1"); assert_eq!(to_string(&None::<usize>).unwrap(), "None"); } #[test] fn test_struct() { #[derive(Serialize)] struct Test { int: u32, seq: Vec<&'static str>, } let test = Test { int: 1, seq: vec!["a", "b"], }; let expected = r#"Test(int=1, seq=["a", "b"])"#; assert_eq!(to_string(&test).unwrap(), expected); } #[test] fn test_enum() { #[derive(Serialize)] enum E { Unit, Newtype(u32), Tuple(u32, u32), Struct { a: u32 }, } let u = E::Unit; let expected = r#"Unit"#; assert_eq!(to_string(&u).unwrap(), expected); let n = E::Newtype(1); let expected = r#"Newtype(1)"#; assert_eq!(to_string(&n).unwrap(), expected); let t = E::Tuple(1, 2); let expected = r#"Tuple(1, 2)"#; assert_eq!(to_string(&t).unwrap(), expected); let s = E::Struct { a: 1 }; let expected = r#"Struct(a=1)"#; assert_eq!(to_string(&s).unwrap(), expected); } #[test] fn test_enum_untagged() { #[derive(Serialize)] #[serde(untagged)] enum E { Unit, Newtype(u32), Tuple(u32, u32), Struct { a: u32 }, } let u = E::Unit; let expected = r#"None"#; assert_eq!(to_string(&u).unwrap(), expected); let n = E::Newtype(1); let expected = r#"1"#; assert_eq!(to_string(&n).unwrap(), expected); let t = E::Tuple(1, 2); let expected = r#"(1, 2)"#; assert_eq!(to_string(&t).unwrap(), expected); let s = E::Struct { a: 1 }; let expected = r#"E(a=1)"#; assert_eq!(to_string(&s).unwrap(), expected); } #[test] fn test_struct_tagged() { #[derive(Serialize)] #[serde(untagged)] enum E { A(A), } #[derive(Serialize)] #[serde(tag = "type")] struct A { a: bool, b: usize, } let u = A { a: true, b: 1 }; // let expected = r#"A(type="A", a=True, b=1)"#; // No we skip all `type` manually inserted variants. let expected = r#"A(a=True, b=1)"#; assert_eq!(to_string(&u).unwrap(), expected); let u = E::A(A { a: true, b: 1 }); let expected = r#"A(a=True, b=1)"#; assert_eq!(to_string(&u).unwrap(), expected); } #[test] fn test_flatten() { #[derive(Serialize)] struct A { a: bool, b: usize, } #[derive(Serialize)] struct B { c: A, d: usize, } #[derive(Serialize)] struct C { #[serde(flatten)] c: A, d: usize, } #[derive(Serialize)] #[serde(transparent)] struct D { e: A, } let u = B { c: A { a: true, b: 1 }, d: 2, }; let expected = r#"B(c=A(a=True, b=1), d=2)"#; assert_eq!(to_string(&u).unwrap(), expected); let u = C { c: A { a: true, b: 1 }, d: 2, }; // XXX This is unfortunate but true, flatten forces the serialization // to use the serialize_map without any means for the Serializer to know about this // flattening attempt let expected = r#"{"a":True, "b":1, "d":2}"#; assert_eq!(to_string(&u).unwrap(), expected); let u = D { e: A { a: true, b: 1 }, }; let expected = r#"A(a=True, b=1)"#; assert_eq!(to_string(&u).unwrap(), expected); }
tokenizers/bindings/python/src/utils/serde_pyo3.rs/0
{ "file_path": "tokenizers/bindings/python/src/utils/serde_pyo3.rs", "repo_id": "tokenizers", "token_count": 10084 }
292