text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
import gc import unittest import torch from diffusers import ( StableDiffusionImg2ImgPipeline, ) from diffusers.utils import load_image from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) from .single_file_testing_utils import SDSingleFileTesterMixin enable_full_determinism() @slow @require_torch_accelerator class StableDiffusionImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionImg2ImgPipeline ckpt_path = ( "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" ) original_config = ( "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) inputs = { "prompt": "a fantasy landscape, concept art, high resolution", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_single_file_format_inference_is_same_as_pretrained(self): super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3) @slow @require_torch_accelerator class StableDiffusion21Img2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionImg2ImgPipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors" original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml" repo_id = "stabilityai/stable-diffusion-2-1" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) inputs = { "prompt": "a fantasy landscape, concept art, high resolution", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_single_file_format_inference_is_same_as_pretrained(self): super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
diffusers/tests/single_file/test_stable_diffusion_img2img_single_file.py/0
{ "file_path": "diffusers/tests/single_file/test_stable_diffusion_img2img_single_file.py", "repo_id": "diffusers", "token_count": 1604 }
203
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import collections import importlib.util import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py TRANSFORMERS_PATH = "src/diffusers" PATH_TO_DOCS = "docs/source/en" REPO_PATH = "." def _find_text_in_file(filename, start_prompt, end_prompt): """ Find the text in `filename` between a line beginning with `start_prompt` and before `end_prompt`, removing empty lines. """ with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start prompt. start_index = 0 while not lines[start_index].startswith(start_prompt): start_index += 1 start_index += 1 end_index = start_index while not lines[end_index].startswith(end_prompt): end_index += 1 end_index -= 1 while len(lines[start_index]) <= 1: start_index += 1 while len(lines[end_index]) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index]), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | ALLOWED_MODEL_SUFFIXES = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. _re_tf_models = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _re_flax_models = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _re_pt_models = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the diffusers module imported is the one in the repo. spec = importlib.util.spec_from_file_location( "diffusers", os.path.join(TRANSFORMERS_PATH, "__init__.py"), submodule_search_locations=[TRANSFORMERS_PATH], ) diffusers_module = spec.loader.load_module() # Thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python def camel_case_split(identifier): """Split a camelcased `identifier` into words.""" matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier) return [m.group(0) for m in matches] def _center_text(text, width): text_length = 2 if text == "โœ…" or text == "โŒ" else len(text) left_indent = (width - text_length) // 2 right_indent = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def get_model_table_from_auto_modules(): """Generates an up-to-date model table from the content of the auto modules.""" # Dictionary model names to config. config_mapping_names = diffusers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES model_name_to_config = { name: config_mapping_names[code] for code, name in diffusers_module.MODEL_NAMES_MAPPING.items() if code in config_mapping_names } model_name_to_prefix = {name: config.replace("ConfigMixin", "") for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. slow_tokenizers = collections.defaultdict(bool) fast_tokenizers = collections.defaultdict(bool) pt_models = collections.defaultdict(bool) tf_models = collections.defaultdict(bool) flax_models = collections.defaultdict(bool) # Let's lookup through all diffusers object (once). for attr_name in dir(diffusers_module): lookup_dict = None if attr_name.endswith("Tokenizer"): lookup_dict = slow_tokenizers attr_name = attr_name[:-9] elif attr_name.endswith("TokenizerFast"): lookup_dict = fast_tokenizers attr_name = attr_name[:-13] elif _re_tf_models.match(attr_name) is not None: lookup_dict = tf_models attr_name = _re_tf_models.match(attr_name).groups()[0] elif _re_flax_models.match(attr_name) is not None: lookup_dict = flax_models attr_name = _re_flax_models.match(attr_name).groups()[0] elif _re_pt_models.match(attr_name) is not None: lookup_dict = pt_models attr_name = _re_pt_models.match(attr_name).groups()[0] if lookup_dict is not None: while len(attr_name) > 0: if attr_name in model_name_to_prefix.values(): lookup_dict[attr_name] = True break # Try again after removing the last word in the name attr_name = "".join(camel_case_split(attr_name)[:-1]) # Let's build that table! model_names = list(model_name_to_config.keys()) model_names.sort(key=str.lower) columns = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). widths = [len(c) + 2 for c in columns] widths[0] = max([len(name) for name in model_names]) + 2 # Build the table per se table = "|" + "|".join([_center_text(c, w) for c, w in zip(columns, widths)]) + "|\n" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths]) + "|\n" check = {True: "โœ…", False: "โŒ"} for name in model_names: prefix = model_name_to_prefix[name] line = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(l, w) for l, w in zip(line, widths)]) + "|\n" return table def check_model_table(overwrite=False): """Check the model table in the index.rst is consistent with the state of the lib and maybe `overwrite`.""" current_table, start_index, end_index, lines = _find_text_in_file( filename=os.path.join(PATH_TO_DOCS, "index.md"), start_prompt="<!--This table is updated automatically from the auto modules", end_prompt="<!-- End table-->", ) new_table = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(PATH_TO_DOCS, "index.md"), "w", encoding="utf-8", newline="\n") as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:]) else: raise ValueError( "The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_model_table(args.fix_and_overwrite)
diffusers/utils/check_table.py/0
{ "file_path": "diffusers/utils/check_table.py", "repo_id": "diffusers", "token_count": 3011 }
204
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that updates the metadata of the Diffusers library in the repository `huggingface/diffusers-metadata`. Usage for an update (as used by the GitHub action `update_metadata`): ```bash python utils/update_metadata.py ``` Script modified from: https://github.com/huggingface/transformers/blob/main/utils/update_metadata.py """ import argparse import os import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from diffusers.pipelines.auto_pipeline import ( AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, AUTO_INPAINT_PIPELINES_MAPPING, AUTO_TEXT2IMAGE_PIPELINES_MAPPING, ) PIPELINE_TAG_JSON = "pipeline_tags.json" def get_supported_pipeline_table() -> dict: """ Generates a dictionary containing the supported auto classes for each pipeline type, using the content of the auto modules. """ # All supported pipelines for automatic mapping. all_supported_pipeline_classes = [ (class_name.__name__, "text-to-image", "AutoPipelineForText2Image") for _, class_name in AUTO_TEXT2IMAGE_PIPELINES_MAPPING.items() ] all_supported_pipeline_classes += [ (class_name.__name__, "image-to-image", "AutoPipelineForImage2Image") for _, class_name in AUTO_IMAGE2IMAGE_PIPELINES_MAPPING.items() ] all_supported_pipeline_classes += [ (class_name.__name__, "image-to-image", "AutoPipelineForInpainting") for _, class_name in AUTO_INPAINT_PIPELINES_MAPPING.items() ] all_supported_pipeline_classes = list(set(all_supported_pipeline_classes)) all_supported_pipeline_classes.sort(key=lambda x: x[0]) data = {} data["pipeline_class"] = [sample[0] for sample in all_supported_pipeline_classes] data["pipeline_tag"] = [sample[1] for sample in all_supported_pipeline_classes] data["auto_class"] = [sample[2] for sample in all_supported_pipeline_classes] return data def update_metadata(commit_sha: str): """ Update the metadata for the Diffusers repo in `huggingface/diffusers-metadata`. Args: commit_sha (`str`): The commit SHA on Diffusers corresponding to this update. """ pipelines_table = get_supported_pipeline_table() pipelines_table = pd.DataFrame(pipelines_table) pipelines_dataset = Dataset.from_pandas(pipelines_table) hub_pipeline_tags_json = hf_hub_download( repo_id="huggingface/diffusers-metadata", filename=PIPELINE_TAG_JSON, repo_type="dataset", ) with open(hub_pipeline_tags_json) as f: hub_pipeline_tags_json = f.read() with tempfile.TemporaryDirectory() as tmp_dir: pipelines_dataset.to_json(os.path.join(tmp_dir, PIPELINE_TAG_JSON)) with open(os.path.join(tmp_dir, PIPELINE_TAG_JSON)) as f: pipeline_tags_json = f.read() hub_pipeline_tags_equal = hub_pipeline_tags_json == pipeline_tags_json if hub_pipeline_tags_equal: print("No updates, not pushing the metadata files.") return if commit_sha is not None: commit_message = ( f"Update with commit {commit_sha}\n\nSee: https://github.com/huggingface/diffusers/commit/{commit_sha}" ) else: commit_message = "Update" upload_folder( repo_id="huggingface/diffusers-metadata", folder_path=tmp_dir, repo_type="dataset", commit_message=commit_message, ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--commit_sha", default=None, type=str, help="The sha of the commit going with this update.") args = parser.parse_args() update_metadata(args.commit_sha)
diffusers/utils/update_metadata.py/0
{ "file_path": "diffusers/utils/update_metadata.py", "repo_id": "diffusers", "token_count": 1690 }
205
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Capture video feed from a camera as raw images.""" import argparse import datetime as dt import os import time from pathlib import Path import cv2 import rerun as rr # see https://rerun.io/docs/howto/visualization/limit-ram RERUN_MEMORY_LIMIT = os.getenv("LEROBOT_RERUN_MEMORY_LIMIT", "5%") def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height: int, duration: int): rr.init("lerobot_capture_camera_feed") rr.spawn(memory_limit=RERUN_MEMORY_LIMIT) now = dt.datetime.now() capture_dir = output_dir / f"{now:%Y-%m-%d}" / f"{now:%H-%M-%S}" if not capture_dir.exists(): capture_dir.mkdir(parents=True, exist_ok=True) # Opens the default webcam cap = cv2.VideoCapture(0) if not cap.isOpened(): print("Error: Could not open video stream.") return cap.set(cv2.CAP_PROP_FPS, fps) cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) frame_index = 0 start_time = time.time() while time.time() - start_time < duration: ret, frame = cap.read() if not ret: print("Error: Could not read frame.") break rr.log("video/stream", rr.Image(frame), static=True) cv2.imwrite(str(capture_dir / f"frame_{frame_index:06d}.png"), frame) frame_index += 1 # Release the capture cap.release() # TODO(Steven): Add a graceful shutdown via a close() method for the Viewer context, though not currently supported in the Rerun API. if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--output-dir", type=Path, default=Path("outputs/cam_capture/"), help="Directory where the capture images are written. A subfolder named with the current date & time will be created inside it for each capture.", ) parser.add_argument( "--fps", type=int, default=30, help="Frames Per Second of the capture.", ) parser.add_argument( "--width", type=int, default=1280, help="Width of the captured images.", ) parser.add_argument( "--height", type=int, default=720, help="Height of the captured images.", ) parser.add_argument( "--duration", type=int, default=20, help="Duration in seconds for which the video stream should be captured.", ) args = parser.parse_args() display_and_save_video_stream(**vars(args))
lerobot/benchmarks/video/capture_camera_feed.py/0
{ "file_path": "lerobot/benchmarks/video/capture_camera_feed.py", "repo_id": "lerobot", "token_count": 1246 }
206
<div class="flex justify-center"> <a target="_blank" href="https://huggingface.co/lerobot"> <img alt="HuggingFace Expert Acceleration Program" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/lerobot-logo-thumbnail.png" style="width: 100%" ></img> </a> </div> # LeRobot **State-of-the-art machine learning for real-world robotics** ๐Ÿค— LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier for entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models. ๐Ÿค— LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning. ๐Ÿค— LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulated environments so that everyone can get started. ๐Ÿค— LeRobot hosts pretrained models and datasets on the LeRobot HuggingFace page. Join the LeRobot community on [Discord](https://discord.gg/s3KuuzsPFb)
lerobot/docs/source/index.mdx/0
{ "file_path": "lerobot/docs/source/index.mdx", "repo_id": "lerobot", "token_count": 337 }
207
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This script demonstrates how to train Diffusion Policy on the PushT environment. Once you have trained a model with this script, you can try to evaluate it on examples/2_evaluate_pretrained_policy.py """ from pathlib import Path import torch from lerobot.configs.types import FeatureType from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata from lerobot.datasets.utils import dataset_to_policy_features from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy def main(): # Create a directory to store the training checkpoint. output_directory = Path("outputs/train/example_pusht_diffusion") output_directory.mkdir(parents=True, exist_ok=True) # # Select your device device = torch.device("cuda") # Number of offline training steps (we'll only do offline training for this example.) # Adjust as you prefer. 5000 steps are needed to get something worth evaluating. training_steps = 5000 log_freq = 1 # When starting from scratch (i.e. not from a pretrained policy), we need to specify 2 things before # creating the policy: # - input/output shapes: to properly size the policy # - dataset stats: for normalization and denormalization of input/outputs dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht") features = dataset_to_policy_features(dataset_metadata.features) output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} input_features = {key: ft for key, ft in features.items() if key not in output_features} # Policies are initialized with a configuration class, in this case `DiffusionConfig`. For this example, # we'll just use the defaults and so no arguments other than input/output features need to be passed. cfg = DiffusionConfig(input_features=input_features, output_features=output_features) # We can now instantiate our policy with this config and the dataset stats. policy = DiffusionPolicy(cfg, dataset_stats=dataset_metadata.stats) policy.train() policy.to(device) # Another policy-dataset interaction is with the delta_timestamps. Each policy expects a given number frames # which can differ for inputs, outputs and rewards (if there are some). delta_timestamps = { "observation.image": [i / dataset_metadata.fps for i in cfg.observation_delta_indices], "observation.state": [i / dataset_metadata.fps for i in cfg.observation_delta_indices], "action": [i / dataset_metadata.fps for i in cfg.action_delta_indices], } # In this case with the standard configuration for Diffusion Policy, it is equivalent to this: delta_timestamps = { # Load the previous image and state at -0.1 seconds before current frame, # then load current image and state corresponding to 0.0 second. "observation.image": [-0.1, 0.0], "observation.state": [-0.1, 0.0], # Load the previous action (-0.1), the next action to be executed (0.0), # and 14 future actions with a 0.1 seconds spacing. All these actions will be # used to supervise the policy. "action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4], } # We can then instantiate the dataset with these delta_timestamps configuration. dataset = LeRobotDataset("lerobot/pusht", delta_timestamps=delta_timestamps) # Then we create our optimizer and dataloader for offline training. optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4) dataloader = torch.utils.data.DataLoader( dataset, num_workers=4, batch_size=64, shuffle=True, pin_memory=device.type != "cpu", drop_last=True, ) # Run training loop. step = 0 done = False while not done: for batch in dataloader: batch = {k: (v.to(device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()} loss, _ = policy.forward(batch) loss.backward() optimizer.step() optimizer.zero_grad() if step % log_freq == 0: print(f"step: {step} loss: {loss.item():.3f}") step += 1 if step >= training_steps: done = True break # Save a policy checkpoint. policy.save_pretrained(output_directory) if __name__ == "__main__": main()
lerobot/examples/3_train_policy.py/0
{ "file_path": "lerobot/examples/3_train_policy.py", "repo_id": "lerobot", "token_count": 1810 }
208
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Provides the RealSenseCamera class for capturing frames from Intel RealSense cameras. """ import logging import time from threading import Event, Lock, Thread from typing import Any import cv2 import numpy as np try: import pyrealsense2 as rs except Exception as e: logging.info(f"Could not import realsense: {e}") from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from ..camera import Camera from ..configs import ColorMode from ..utils import get_cv2_rotation from .configuration_realsense import RealSenseCameraConfig logger = logging.getLogger(__name__) class RealSenseCamera(Camera): """ Manages interactions with Intel RealSense cameras for frame and depth recording. This class provides an interface similar to `OpenCVCamera` but tailored for RealSense devices, leveraging the `pyrealsense2` library. It uses the camera's unique serial number for identification, offering more stability than device indices, especially on Linux. It also supports capturing depth maps alongside color frames. Use the provided utility script to find available camera indices and default profiles: ```bash lerobot-find-cameras realsense ``` A `RealSenseCamera` instance requires a configuration object specifying the camera's serial number or a unique device name. If using the name, ensure only one camera with that name is connected. The camera's default settings (FPS, resolution, color mode) from the stream profile are used unless overridden in the configuration. Example: ```python from lerobot.cameras.realsense import RealSenseCamera, RealSenseCameraConfig from lerobot.cameras import ColorMode, Cv2Rotation # Basic usage with serial number config = RealSenseCameraConfig(serial_number_or_name="0123456789") # Replace with actual SN camera = RealSenseCamera(config) camera.connect() # Read 1 frame synchronously color_image = camera.read() print(color_image.shape) # Read 1 frame asynchronously async_image = camera.async_read() # When done, properly disconnect the camera using camera.disconnect() # Example with depth capture and custom settings custom_config = RealSenseCameraConfig( serial_number_or_name="0123456789", # Replace with actual SN fps=30, width=1280, height=720, color_mode=ColorMode.BGR, # Request BGR output rotation=Cv2Rotation.NO_ROTATION, use_depth=True ) depth_camera = RealSenseCamera(custom_config) depth_camera.connect() # Read 1 depth frame depth_map = depth_camera.read_depth() # Example using a unique camera name name_config = RealSenseCameraConfig(serial_number_or_name="Intel RealSense D435") # If unique name_camera = RealSenseCamera(name_config) # ... connect, read, disconnect ... ``` """ def __init__(self, config: RealSenseCameraConfig): """ Initializes the RealSenseCamera instance. Args: config: The configuration settings for the camera. """ super().__init__(config) self.config = config if config.serial_number_or_name.isdigit(): self.serial_number = config.serial_number_or_name else: self.serial_number = self._find_serial_number_from_name(config.serial_number_or_name) self.fps = config.fps self.color_mode = config.color_mode self.use_depth = config.use_depth self.warmup_s = config.warmup_s self.rs_pipeline: rs.pipeline | None = None self.rs_profile: rs.pipeline_profile | None = None self.thread: Thread | None = None self.stop_event: Event | None = None self.frame_lock: Lock = Lock() self.latest_frame: np.ndarray | None = None self.new_frame_event: Event = Event() self.rotation: int | None = get_cv2_rotation(config.rotation) if self.height and self.width: self.capture_width, self.capture_height = self.width, self.height if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]: self.capture_width, self.capture_height = self.height, self.width def __str__(self) -> str: return f"{self.__class__.__name__}({self.serial_number})" @property def is_connected(self) -> bool: """Checks if the camera pipeline is started and streams are active.""" return self.rs_pipeline is not None and self.rs_profile is not None def connect(self, warmup: bool = True): """ Connects to the RealSense camera specified in the configuration. Initializes the RealSense pipeline, configures the required streams (color and optionally depth), starts the pipeline, and validates the actual stream settings. Raises: DeviceAlreadyConnectedError: If the camera is already connected. ValueError: If the configuration is invalid (e.g., missing serial/name, name not unique). ConnectionError: If the camera is found but fails to start the pipeline or no RealSense devices are detected at all. RuntimeError: If the pipeline starts but fails to apply requested settings. """ if self.is_connected: raise DeviceAlreadyConnectedError(f"{self} is already connected.") self.rs_pipeline = rs.pipeline() rs_config = rs.config() self._configure_rs_pipeline_config(rs_config) try: self.rs_profile = self.rs_pipeline.start(rs_config) except RuntimeError as e: self.rs_profile = None self.rs_pipeline = None raise ConnectionError( f"Failed to open {self}.Run `lerobot-find-cameras realsense` to find available cameras." ) from e self._configure_capture_settings() if warmup: time.sleep( 1 ) # NOTE(Steven): RS cameras need a bit of time to warm up before the first read. If we don't wait, the first read from the warmup will raise. start_time = time.time() while time.time() - start_time < self.warmup_s: self.read() time.sleep(0.1) logger.info(f"{self} connected.") @staticmethod def find_cameras() -> list[dict[str, Any]]: """ Detects available Intel RealSense cameras connected to the system. Returns: List[Dict[str, Any]]: A list of dictionaries, where each dictionary contains 'type', 'id' (serial number), 'name', firmware version, USB type, and other available specs, and the default profile properties (width, height, fps, format). Raises: OSError: If pyrealsense2 is not installed. ImportError: If pyrealsense2 is not installed. """ found_cameras_info = [] context = rs.context() devices = context.query_devices() for device in devices: camera_info = { "name": device.get_info(rs.camera_info.name), "type": "RealSense", "id": device.get_info(rs.camera_info.serial_number), "firmware_version": device.get_info(rs.camera_info.firmware_version), "usb_type_descriptor": device.get_info(rs.camera_info.usb_type_descriptor), "physical_port": device.get_info(rs.camera_info.physical_port), "product_id": device.get_info(rs.camera_info.product_id), "product_line": device.get_info(rs.camera_info.product_line), } # Get stream profiles for each sensor sensors = device.query_sensors() for sensor in sensors: profiles = sensor.get_stream_profiles() for profile in profiles: if profile.is_video_stream_profile() and profile.is_default(): vprofile = profile.as_video_stream_profile() stream_info = { "stream_type": vprofile.stream_name(), "format": vprofile.format().name, "width": vprofile.width(), "height": vprofile.height(), "fps": vprofile.fps(), } camera_info["default_stream_profile"] = stream_info found_cameras_info.append(camera_info) return found_cameras_info def _find_serial_number_from_name(self, name: str) -> str: """Finds the serial number for a given unique camera name.""" camera_infos = self.find_cameras() found_devices = [cam for cam in camera_infos if str(cam["name"]) == name] if not found_devices: available_names = [cam["name"] for cam in camera_infos] raise ValueError( f"No RealSense camera found with name '{name}'. Available camera names: {available_names}" ) if len(found_devices) > 1: serial_numbers = [dev["serial_number"] for dev in found_devices] raise ValueError( f"Multiple RealSense cameras found with name '{name}'. " f"Please use a unique serial number instead. Found SNs: {serial_numbers}" ) serial_number = str(found_devices[0]["serial_number"]) return serial_number def _configure_rs_pipeline_config(self, rs_config): """Creates and configures the RealSense pipeline configuration object.""" rs.config.enable_device(rs_config, self.serial_number) if self.width and self.height and self.fps: rs_config.enable_stream( rs.stream.color, self.capture_width, self.capture_height, rs.format.rgb8, self.fps ) if self.use_depth: rs_config.enable_stream( rs.stream.depth, self.capture_width, self.capture_height, rs.format.z16, self.fps ) else: rs_config.enable_stream(rs.stream.color) if self.use_depth: rs_config.enable_stream(rs.stream.depth) def _configure_capture_settings(self) -> None: """Sets fps, width, and height from device stream if not already configured. Uses the color stream profile to update unset attributes. Handles rotation by swapping width/height when needed. Original capture dimensions are always stored. Raises: DeviceNotConnectedError: If device is not connected. """ if not self.is_connected: raise DeviceNotConnectedError(f"Cannot validate settings for {self} as it is not connected.") stream = self.rs_profile.get_stream(rs.stream.color).as_video_stream_profile() if self.fps is None: self.fps = stream.fps() if self.width is None or self.height is None: actual_width = int(round(stream.width())) actual_height = int(round(stream.height())) if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE]: self.width, self.height = actual_height, actual_width self.capture_width, self.capture_height = actual_width, actual_height else: self.width, self.height = actual_width, actual_height self.capture_width, self.capture_height = actual_width, actual_height def read_depth(self, timeout_ms: int = 200) -> np.ndarray: """ Reads a single frame (depth) synchronously from the camera. This is a blocking call. It waits for a coherent set of frames (depth) from the camera hardware via the RealSense pipeline. Args: timeout_ms (int): Maximum time in milliseconds to wait for a frame. Defaults to 200ms. Returns: np.ndarray: The depth map as a NumPy array (height, width) of type `np.uint16` (raw depth values in millimeters) and rotation. Raises: DeviceNotConnectedError: If the camera is not connected. RuntimeError: If reading frames from the pipeline fails or frames are invalid. """ if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") if not self.use_depth: raise RuntimeError( f"Failed to capture depth frame '.read_depth()'. Depth stream is not enabled for {self}." ) start_time = time.perf_counter() ret, frame = self.rs_pipeline.try_wait_for_frames(timeout_ms=timeout_ms) if not ret or frame is None: raise RuntimeError(f"{self} read_depth failed (status={ret}).") depth_frame = frame.get_depth_frame() depth_map = np.asanyarray(depth_frame.get_data()) depth_map_processed = self._postprocess_image(depth_map, depth_frame=True) read_duration_ms = (time.perf_counter() - start_time) * 1e3 logger.debug(f"{self} read took: {read_duration_ms:.1f}ms") return depth_map_processed def read(self, color_mode: ColorMode | None = None, timeout_ms: int = 200) -> np.ndarray: """ Reads a single frame (color) synchronously from the camera. This is a blocking call. It waits for a coherent set of frames (color) from the camera hardware via the RealSense pipeline. Args: timeout_ms (int): Maximum time in milliseconds to wait for a frame. Defaults to 200ms. Returns: np.ndarray: The captured color frame as a NumPy array (height, width, channels), processed according to `color_mode` and rotation. Raises: DeviceNotConnectedError: If the camera is not connected. RuntimeError: If reading frames from the pipeline fails or frames are invalid. ValueError: If an invalid `color_mode` is requested. """ if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") start_time = time.perf_counter() ret, frame = self.rs_pipeline.try_wait_for_frames(timeout_ms=timeout_ms) if not ret or frame is None: raise RuntimeError(f"{self} read failed (status={ret}).") color_frame = frame.get_color_frame() color_image_raw = np.asanyarray(color_frame.get_data()) color_image_processed = self._postprocess_image(color_image_raw, color_mode) read_duration_ms = (time.perf_counter() - start_time) * 1e3 logger.debug(f"{self} read took: {read_duration_ms:.1f}ms") return color_image_processed def _postprocess_image( self, image: np.ndarray, color_mode: ColorMode | None = None, depth_frame: bool = False ) -> np.ndarray: """ Applies color conversion, dimension validation, and rotation to a raw color frame. Args: image (np.ndarray): The raw image frame (expected RGB format from RealSense). color_mode (Optional[ColorMode]): The target color mode (RGB or BGR). If None, uses the instance's default `self.color_mode`. Returns: np.ndarray: The processed image frame according to `self.color_mode` and `self.rotation`. Raises: ValueError: If the requested `color_mode` is invalid. RuntimeError: If the raw frame dimensions do not match the configured `width` and `height`. """ if color_mode and color_mode not in (ColorMode.RGB, ColorMode.BGR): raise ValueError( f"Invalid requested color mode '{color_mode}'. Expected {ColorMode.RGB} or {ColorMode.BGR}." ) if depth_frame: h, w = image.shape else: h, w, c = image.shape if c != 3: raise RuntimeError(f"{self} frame channels={c} do not match expected 3 channels (RGB/BGR).") if h != self.capture_height or w != self.capture_width: raise RuntimeError( f"{self} frame width={w} or height={h} do not match configured width={self.capture_width} or height={self.capture_height}." ) processed_image = image if self.color_mode == ColorMode.BGR: processed_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) if self.rotation in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_180]: processed_image = cv2.rotate(processed_image, self.rotation) return processed_image def _read_loop(self): """ Internal loop run by the background thread for asynchronous reading. On each iteration: 1. Reads a color frame with 500ms timeout 2. Stores result in latest_frame (thread-safe) 3. Sets new_frame_event to notify listeners Stops on DeviceNotConnectedError, logs other errors and continues. """ while not self.stop_event.is_set(): try: color_image = self.read(timeout_ms=500) with self.frame_lock: self.latest_frame = color_image self.new_frame_event.set() except DeviceNotConnectedError: break except Exception as e: logger.warning(f"Error reading frame in background thread for {self}: {e}") def _start_read_thread(self) -> None: """Starts or restarts the background read thread if it's not running.""" if self.thread is not None and self.thread.is_alive(): self.thread.join(timeout=0.1) if self.stop_event is not None: self.stop_event.set() self.stop_event = Event() self.thread = Thread(target=self._read_loop, args=(), name=f"{self}_read_loop") self.thread.daemon = True self.thread.start() def _stop_read_thread(self): """Signals the background read thread to stop and waits for it to join.""" if self.stop_event is not None: self.stop_event.set() if self.thread is not None and self.thread.is_alive(): self.thread.join(timeout=2.0) self.thread = None self.stop_event = None # NOTE(Steven): Missing implementation for depth for now def async_read(self, timeout_ms: float = 200) -> np.ndarray: """ Reads the latest available frame data (color) asynchronously. This method retrieves the most recent color frame captured by the background read thread. It does not block waiting for the camera hardware directly, but may wait up to timeout_ms for the background thread to provide a frame. Args: timeout_ms (float): Maximum time in milliseconds to wait for a frame to become available. Defaults to 200ms (0.2 seconds). Returns: np.ndarray: The latest captured frame data (color image), processed according to configuration. Raises: DeviceNotConnectedError: If the camera is not connected. TimeoutError: If no frame data becomes available within the specified timeout. RuntimeError: If the background thread died unexpectedly or another error occurs. """ if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") if self.thread is None or not self.thread.is_alive(): self._start_read_thread() if not self.new_frame_event.wait(timeout=timeout_ms / 1000.0): thread_alive = self.thread is not None and self.thread.is_alive() raise TimeoutError( f"Timed out waiting for frame from camera {self} after {timeout_ms} ms. " f"Read thread alive: {thread_alive}." ) with self.frame_lock: frame = self.latest_frame self.new_frame_event.clear() if frame is None: raise RuntimeError(f"Internal error: Event set but no frame available for {self}.") return frame def disconnect(self): """ Disconnects from the camera, stops the pipeline, and cleans up resources. Stops the background read thread (if running) and stops the RealSense pipeline. Raises: DeviceNotConnectedError: If the camera is already disconnected (pipeline not running). """ if not self.is_connected and self.thread is None: raise DeviceNotConnectedError( f"Attempted to disconnect {self}, but it appears already disconnected." ) if self.thread is not None: self._stop_read_thread() if self.rs_pipeline is not None: self.rs_pipeline.stop() self.rs_pipeline = None self.rs_profile = None logger.info(f"{self} disconnected.")
lerobot/src/lerobot/cameras/realsense/camera_realsense.py/0
{ "file_path": "lerobot/src/lerobot/cameras/realsense/camera_realsense.py", "repo_id": "lerobot", "token_count": 9038 }
209
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """An online buffer for the online training loop in train.py Note to maintainers: This duplicates some logic from LeRobotDataset and EpisodeAwareSampler. We should consider converging to one approach. Here we have opted to use numpy.memmap to back the data buffer. It's much faster than using HuggingFace Datasets as there's no conversion to an intermediate non-python object. Also it supports in-place slicing and mutation which is very handy for a dynamic buffer. """ import os from pathlib import Path from typing import Any import numpy as np import torch from lerobot.datasets.lerobot_dataset import LeRobotDataset def _make_memmap_safe(**kwargs) -> np.memmap: """Make a numpy memmap with checks on available disk space first. Expected kwargs are: "filename", "dtype" (must by np.dtype), "mode" and "shape" For information on dtypes: https://numpy.org/doc/stable/reference/arrays.dtypes.html#arrays-dtypes-constructing """ if kwargs["mode"].startswith("w"): required_space = kwargs["dtype"].itemsize * np.prod(kwargs["shape"]) # bytes stats = os.statvfs(Path(kwargs["filename"]).parent) available_space = stats.f_bavail * stats.f_frsize # bytes if required_space >= available_space * 0.8: raise RuntimeError( f"You're about to take up {required_space} of {available_space} bytes available." ) return np.memmap(**kwargs) class OnlineBuffer(torch.utils.data.Dataset): """FIFO data buffer for the online training loop in train.py. Follows the protocol of LeRobotDataset as much as is required to have it be used by the online training loop in the same way that a LeRobotDataset would be used. The underlying data structure will have data inserted in a circular fashion. Always insert after the last index, and when you reach the end, wrap around to the start. The data is stored in a numpy memmap. """ NEXT_INDEX_KEY = "_next_index" OCCUPANCY_MASK_KEY = "_occupancy_mask" INDEX_KEY = "index" FRAME_INDEX_KEY = "frame_index" EPISODE_INDEX_KEY = "episode_index" TIMESTAMP_KEY = "timestamp" IS_PAD_POSTFIX = "_is_pad" def __init__( self, write_dir: str | Path, data_spec: dict[str, Any] | None, buffer_capacity: int | None, fps: float | None = None, delta_timestamps: dict[str, list[float]] | dict[str, np.ndarray] | None = None, ): """ The online buffer can be provided from scratch or you can load an existing online buffer by passing a `write_dir` associated with an existing buffer. Args: write_dir: Where to keep the numpy memmap files. One memmap file will be stored for each data key. Note that if the files already exist, they are opened in read-write mode (used for training resumption.) data_spec: A mapping from data key to data specification, like {data_key: {"shape": tuple[int], "dtype": np.dtype}}. This should include all the data that you wish to record into the buffer, but note that "index", "frame_index" and "episode_index" are already accounted for by this class, so you don't need to include them. buffer_capacity: How many frames should be stored in the buffer as a maximum. Be aware of your system's available disk space when choosing this. fps: Same as the fps concept in LeRobot dataset. Here it needs to be provided for the delta_timestamps logic. You can pass None if you are not using delta_timestamps. delta_timestamps: Same as the delta_timestamps concept in LeRobotDataset. This is internally converted to dict[str, np.ndarray] for optimization purposes. """ self.set_delta_timestamps(delta_timestamps) self._fps = fps # Tolerance in seconds used to discard loaded frames when their timestamps are not close enough from # the requested frames. It is only used when `delta_timestamps` is provided. # minus 1e-4 to account for possible numerical error self.tolerance_s = 1 / self.fps - 1e-4 if fps is not None else None self._buffer_capacity = buffer_capacity data_spec = self._make_data_spec(data_spec, buffer_capacity) Path(write_dir).mkdir(parents=True, exist_ok=True) self._data = {} for k, v in data_spec.items(): self._data[k] = _make_memmap_safe( filename=Path(write_dir) / k, dtype=v["dtype"] if v is not None else None, mode="r+" if (Path(write_dir) / k).exists() else "w+", shape=tuple(v["shape"]) if v is not None else None, ) @property def delta_timestamps(self) -> dict[str, np.ndarray] | None: return self._delta_timestamps def set_delta_timestamps(self, value: dict[str, list[float]] | None): """Set delta_timestamps converting the values to numpy arrays. The conversion is for an optimization in the __getitem__. The loop is much slower if the arrays need to be converted into numpy arrays. """ if value is not None: self._delta_timestamps = {k: np.array(v) for k, v in value.items()} else: self._delta_timestamps = None def _make_data_spec(self, data_spec: dict[str, Any], buffer_capacity: int) -> dict[str, dict[str, Any]]: """Makes the data spec for np.memmap.""" if any(k.startswith("_") for k in data_spec): raise ValueError( "data_spec keys should not start with '_'. This prefix is reserved for internal logic." ) preset_keys = { OnlineBuffer.INDEX_KEY, OnlineBuffer.FRAME_INDEX_KEY, OnlineBuffer.EPISODE_INDEX_KEY, OnlineBuffer.TIMESTAMP_KEY, } if len(intersection := set(data_spec).intersection(preset_keys)) > 0: raise ValueError( f"data_spec should not contain any of {preset_keys} as these are handled internally. " f"The provided data_spec has {intersection}." ) complete_data_spec = { # _next_index will be a pointer to the next index that we should start filling from when we add # more data. OnlineBuffer.NEXT_INDEX_KEY: {"dtype": np.dtype("int64"), "shape": ()}, # Since the memmap is initialized with all-zeros, this keeps track of which indices are occupied # with real data rather than the dummy initialization. OnlineBuffer.OCCUPANCY_MASK_KEY: {"dtype": np.dtype("?"), "shape": (buffer_capacity,)}, OnlineBuffer.INDEX_KEY: {"dtype": np.dtype("int64"), "shape": (buffer_capacity,)}, OnlineBuffer.FRAME_INDEX_KEY: {"dtype": np.dtype("int64"), "shape": (buffer_capacity,)}, OnlineBuffer.EPISODE_INDEX_KEY: {"dtype": np.dtype("int64"), "shape": (buffer_capacity,)}, OnlineBuffer.TIMESTAMP_KEY: {"dtype": np.dtype("float64"), "shape": (buffer_capacity,)}, } for k, v in data_spec.items(): complete_data_spec[k] = {"dtype": v["dtype"], "shape": (buffer_capacity, *v["shape"])} return complete_data_spec def add_data(self, data: dict[str, np.ndarray]): """Add new data to the buffer, which could potentially mean shifting old data out. The new data should contain all the frames (in order) of any number of episodes. The indices should start from 0 (note to the developer: this can easily be generalized). See the `rollout` and `eval_policy` functions in `eval.py` for more information on how the data is constructed. Shift the incoming data index and episode_index to continue on from the last frame. Note that this will be done in place! """ if len(missing_keys := (set(self.data_keys).difference(set(data)))) > 0: raise ValueError(f"Missing data keys: {missing_keys}") new_data_length = len(data[self.data_keys[0]]) if not all(len(data[k]) == new_data_length for k in self.data_keys): raise ValueError("All data items should have the same length") next_index = self._data[OnlineBuffer.NEXT_INDEX_KEY] # Sanity check to make sure that the new data indices start from 0. assert data[OnlineBuffer.EPISODE_INDEX_KEY][0].item() == 0 assert data[OnlineBuffer.INDEX_KEY][0].item() == 0 # Shift the incoming indices if necessary. if self.num_frames > 0: last_episode_index = self._data[OnlineBuffer.EPISODE_INDEX_KEY][next_index - 1] last_data_index = self._data[OnlineBuffer.INDEX_KEY][next_index - 1] data[OnlineBuffer.EPISODE_INDEX_KEY] += last_episode_index + 1 data[OnlineBuffer.INDEX_KEY] += last_data_index + 1 # Insert the new data starting from next_index. It may be necessary to wrap around to the start. n_surplus = max(0, new_data_length - (self._buffer_capacity - next_index)) for k in self.data_keys: if n_surplus == 0: slc = slice(next_index, next_index + new_data_length) self._data[k][slc] = data[k] self._data[OnlineBuffer.OCCUPANCY_MASK_KEY][slc] = True else: self._data[k][next_index:] = data[k][:-n_surplus] self._data[OnlineBuffer.OCCUPANCY_MASK_KEY][next_index:] = True self._data[k][:n_surplus] = data[k][-n_surplus:] if n_surplus == 0: self._data[OnlineBuffer.NEXT_INDEX_KEY] = next_index + new_data_length else: self._data[OnlineBuffer.NEXT_INDEX_KEY] = n_surplus @property def data_keys(self) -> list[str]: keys = set(self._data) keys.remove(OnlineBuffer.OCCUPANCY_MASK_KEY) keys.remove(OnlineBuffer.NEXT_INDEX_KEY) return sorted(keys) @property def fps(self) -> float | None: return self._fps @property def num_episodes(self) -> int: return len( np.unique(self._data[OnlineBuffer.EPISODE_INDEX_KEY][self._data[OnlineBuffer.OCCUPANCY_MASK_KEY]]) ) @property def num_frames(self) -> int: return np.count_nonzero(self._data[OnlineBuffer.OCCUPANCY_MASK_KEY]) def __len__(self): return self.num_frames def _item_to_tensors(self, item: dict) -> dict: item_ = {} for k, v in item.items(): if isinstance(v, torch.Tensor): item_[k] = v elif isinstance(v, np.ndarray): item_[k] = torch.from_numpy(v) else: item_[k] = torch.tensor(v) return item_ def __getitem__(self, idx: int) -> dict[str, torch.Tensor]: if idx >= len(self) or idx < -len(self): raise IndexError item = {k: v[idx] for k, v in self._data.items() if not k.startswith("_")} if self.delta_timestamps is None: return self._item_to_tensors(item) episode_index = item[OnlineBuffer.EPISODE_INDEX_KEY] current_ts = item[OnlineBuffer.TIMESTAMP_KEY] episode_data_indices = np.where( np.bitwise_and( self._data[OnlineBuffer.EPISODE_INDEX_KEY] == episode_index, self._data[OnlineBuffer.OCCUPANCY_MASK_KEY], ) )[0] episode_timestamps = self._data[OnlineBuffer.TIMESTAMP_KEY][episode_data_indices] for data_key in self.delta_timestamps: # Note: The logic in this loop is copied from `load_previous_and_future_frames`. # Get timestamps used as query to retrieve data of previous/future frames. query_ts = current_ts + self.delta_timestamps[data_key] # Compute distances between each query timestamp and all timestamps of all the frames belonging to # the episode. dist = np.abs(query_ts[:, None] - episode_timestamps[None, :]) argmin_ = np.argmin(dist, axis=1) min_ = dist[np.arange(dist.shape[0]), argmin_] is_pad = min_ > self.tolerance_s # Check violated query timestamps are all outside the episode range. assert ( (query_ts[is_pad] < episode_timestamps[0]) | (episode_timestamps[-1] < query_ts[is_pad]) ).all(), ( f"One or several timestamps unexpectedly violate the tolerance ({min_} > {self.tolerance_s=}" ") inside the episode range." ) # Load frames for this data key. item[data_key] = self._data[data_key][episode_data_indices[argmin_]] item[f"{data_key}{OnlineBuffer.IS_PAD_POSTFIX}"] = is_pad return self._item_to_tensors(item) def get_data_by_key(self, key: str) -> torch.Tensor: """Returns all data for a given data key as a Tensor.""" return torch.from_numpy(self._data[key][self._data[OnlineBuffer.OCCUPANCY_MASK_KEY]]) def compute_sampler_weights( offline_dataset: LeRobotDataset, offline_drop_n_last_frames: int = 0, online_dataset: OnlineBuffer | None = None, online_sampling_ratio: float | None = None, online_drop_n_last_frames: int = 0, ) -> torch.Tensor: """Compute the sampling weights for the online training dataloader in train.py. Args: offline_dataset: The LeRobotDataset used for offline pre-training. online_drop_n_last_frames: Number of frames to drop from the end of each offline dataset episode. online_dataset: The OnlineBuffer used in online training. online_sampling_ratio: The proportion of data that should be sampled from the online dataset. If an online dataset is provided, this value must also be provided. online_drop_n_first_frames: See `offline_drop_n_last_frames`. This is the same, but for the online dataset. Returns: Tensor of weights for [offline_dataset; online_dataset], normalized to 1. Notes to maintainers: - This duplicates some logic from EpisodeAwareSampler. We should consider converging to one approach. - When used with `torch.utils.data.WeightedRandomSampler`, it could completely replace `EpisodeAwareSampler` as the online dataset related arguments are optional. The only missing feature is the ability to turn shuffling off. - Options `drop_first_n_frames` and `episode_indices_to_use` can be added easily. They were not included here to avoid adding complexity. """ if len(offline_dataset) == 0 and (online_dataset is None or len(online_dataset) == 0): raise ValueError("At least one of `offline_dataset` or `online_dataset` should be contain data.") if (online_dataset is None) ^ (online_sampling_ratio is None): raise ValueError( "`online_dataset` and `online_sampling_ratio` must be provided together or not at all." ) offline_sampling_ratio = 0 if online_sampling_ratio is None else 1 - online_sampling_ratio weights = [] if len(offline_dataset) > 0: offline_data_mask_indices = [] for start_index, end_index in zip( offline_dataset.episode_data_index["from"], offline_dataset.episode_data_index["to"], strict=True, ): offline_data_mask_indices.extend( range(start_index.item(), end_index.item() - offline_drop_n_last_frames) ) offline_data_mask = torch.zeros(len(offline_dataset), dtype=torch.bool) offline_data_mask[torch.tensor(offline_data_mask_indices)] = True weights.append( torch.full( size=(len(offline_dataset),), fill_value=offline_sampling_ratio / offline_data_mask.sum(), ) * offline_data_mask ) if online_dataset is not None and len(online_dataset) > 0: online_data_mask_indices = [] episode_indices = online_dataset.get_data_by_key("episode_index") for episode_idx in torch.unique(episode_indices): where_episode = torch.where(episode_indices == episode_idx) start_index = where_episode[0][0] end_index = where_episode[0][-1] + 1 online_data_mask_indices.extend( range(start_index.item(), end_index.item() - online_drop_n_last_frames) ) online_data_mask = torch.zeros(len(online_dataset), dtype=torch.bool) online_data_mask[torch.tensor(online_data_mask_indices)] = True weights.append( torch.full( size=(len(online_dataset),), fill_value=online_sampling_ratio / online_data_mask.sum(), ) * online_data_mask ) weights = torch.cat(weights) if weights.sum() == 0: weights += 1 / len(weights) else: weights /= weights.sum() return weights
lerobot/src/lerobot/datasets/online_buffer.py/0
{ "file_path": "lerobot/src/lerobot/datasets/online_buffer.py", "repo_id": "lerobot", "token_count": 7375 }
210
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class DeviceNotConnectedError(ConnectionError): """Exception raised when the device is not connected.""" def __init__(self, message="This device is not connected. Try calling `connect()` first."): self.message = message super().__init__(self.message) class DeviceAlreadyConnectedError(ConnectionError): """Exception raised when the device is already connected.""" def __init__( self, message="This device is already connected. Try not calling `connect()` twice.", ): self.message = message super().__init__(self.message) class InvalidActionError(ValueError): """Exception raised when an action is already invalid.""" def __init__( self, message="The action is invalid. Check the value follows what it is expected from the action space.", ): self.message = message super().__init__(self.message)
lerobot/src/lerobot/errors.py/0
{ "file_path": "lerobot/src/lerobot/errors.py", "repo_id": "lerobot", "token_count": 459 }
211
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import math from dataclasses import asdict, dataclass from pathlib import Path import draccus from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR, LRScheduler from lerobot.constants import SCHEDULER_STATE from lerobot.datasets.utils import write_json from lerobot.utils.io_utils import deserialize_json_into_object @dataclass class LRSchedulerConfig(draccus.ChoiceRegistry, abc.ABC): num_warmup_steps: int @property def type(self) -> str: return self.get_choice_name(self.__class__) @abc.abstractmethod def build(self, optimizer: Optimizer, num_training_steps: int) -> LRScheduler | None: raise NotImplementedError @LRSchedulerConfig.register_subclass("diffuser") @dataclass class DiffuserSchedulerConfig(LRSchedulerConfig): name: str = "cosine" num_warmup_steps: int | None = None def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR: from diffusers.optimization import get_scheduler kwargs = {**asdict(self), "num_training_steps": num_training_steps, "optimizer": optimizer} return get_scheduler(**kwargs) @LRSchedulerConfig.register_subclass("vqbet") @dataclass class VQBeTSchedulerConfig(LRSchedulerConfig): num_warmup_steps: int num_vqvae_training_steps: int num_cycles: float = 0.5 def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR: def lr_lambda(current_step): if current_step < self.num_vqvae_training_steps: return float(1) else: adjusted_step = current_step - self.num_vqvae_training_steps if adjusted_step < self.num_warmup_steps: return float(adjusted_step) / float(max(1, self.num_warmup_steps)) progress = float(adjusted_step - self.num_warmup_steps) / float( max(1, num_training_steps - self.num_warmup_steps) ) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(self.num_cycles) * 2.0 * progress))) return LambdaLR(optimizer, lr_lambda, -1) @LRSchedulerConfig.register_subclass("cosine_decay_with_warmup") @dataclass class CosineDecayWithWarmupSchedulerConfig(LRSchedulerConfig): """Used by Physical Intelligence to train Pi0""" num_warmup_steps: int num_decay_steps: int peak_lr: float decay_lr: float def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR: del num_training_steps def lr_lambda(current_step): def linear_warmup_schedule(current_step): if current_step <= 0: return 1 / (self.num_warmup_steps + 1) frac = 1 - current_step / self.num_warmup_steps return (1 / (self.num_warmup_steps + 1) - 1) * frac + 1 def cosine_decay_schedule(current_step): step = min(current_step, self.num_decay_steps) cosine_decay = 0.5 * (1 + math.cos(math.pi * step / self.num_decay_steps)) alpha = self.decay_lr / self.peak_lr decayed = (1 - alpha) * cosine_decay + alpha return decayed if current_step < self.num_warmup_steps: return linear_warmup_schedule(current_step) return cosine_decay_schedule(current_step) return LambdaLR(optimizer, lr_lambda, -1) def save_scheduler_state(scheduler: LRScheduler, save_dir: Path) -> None: state_dict = scheduler.state_dict() write_json(state_dict, save_dir / SCHEDULER_STATE) def load_scheduler_state(scheduler: LRScheduler, save_dir: Path) -> LRScheduler: state_dict = deserialize_json_into_object(save_dir / SCHEDULER_STATE, scheduler.state_dict()) scheduler.load_state_dict(state_dict) return scheduler
lerobot/src/lerobot/optim/schedulers.py/0
{ "file_path": "lerobot/src/lerobot/optim/schedulers.py", "repo_id": "lerobot", "token_count": 1831 }
212
#!/usr/bin/env python # Copyright 2025 Physical Intelligence and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ฯ€0: A Vision-Language-Action Flow Model for General Robot Control [Paper](https://www.physicalintelligence.company/download/pi0.pdf) [Jax code](https://github.com/Physical-Intelligence/openpi) Designed by Physical Intelligence. Ported from Jax by Hugging Face. Disclaimer: It is not expected to perform as well as the original implementation. Install pi0 extra dependencies: ```bash pip install -e ".[pi0]" ``` Example of finetuning the pi0 pretrained model (`pi0_base` in `openpi`): ```bash lerobot-train \ --policy.path=lerobot/pi0 \ --dataset.repo_id=danaaubakirova/koch_test ``` Example of finetuning the pi0 neural network with PaliGemma and expert Gemma pretrained with VLM default parameters before pi0 finetuning: ```bash lerobot-train \ --policy.type=pi0 \ --dataset.repo_id=danaaubakirova/koch_test ``` Example of using the pi0 pretrained model outside LeRobot training framework: ```python policy = Pi0Policy.from_pretrained("lerobot/pi0") ``` """ import math from collections import deque import torch import torch.nn.functional as F # noqa: N812 from torch import Tensor, nn from transformers import AutoTokenizer from lerobot.constants import ACTION, OBS_STATE from lerobot.policies.normalize import Normalize, Unnormalize from lerobot.policies.pi0.configuration_pi0 import PI0Config from lerobot.policies.pi0.paligemma_with_expert import ( PaliGemmaWithExpertConfig, PaliGemmaWithExpertModel, ) from lerobot.policies.pretrained import PreTrainedPolicy from lerobot.policies.utils import log_model_loading_keys from lerobot.utils.utils import get_safe_dtype, init_logging def create_sinusoidal_pos_embedding( time: torch.tensor, dimension: int, min_period: float, max_period: float, device="cpu" ) -> Tensor: """Computes sine-cosine positional embedding vectors for scalar positions.""" if dimension % 2 != 0: raise ValueError(f"dimension ({dimension}) must be divisible by 2") if time.ndim != 1: raise ValueError("The time tensor is expected to be of shape `(batch_size, )`.") dtype = get_safe_dtype(torch.float64, device.type) fraction = torch.linspace(0.0, 1.0, dimension // 2, dtype=dtype, device=device) period = min_period * (max_period / min_period) ** fraction # Compute the outer product scaling_factor = 1.0 / period * 2 * math.pi sin_input = scaling_factor[None, :] * time[:, None] pos_emb = torch.cat([torch.sin(sin_input), torch.cos(sin_input)], dim=1) return pos_emb def make_att_2d_masks(pad_masks, att_masks): """Copied from big_vision. Tokens can attend to valid inputs tokens which have a cumulative mask_ar smaller or equal to theirs. This way `mask_ar` int[B, N] can be used to setup several types of attention, for example: [[1 1 1 1 1 1]]: pure causal attention. [[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between themselves and the last 3 tokens have a causal attention. The first entry could also be a 1 without changing behaviour. [[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a block can attend all previous blocks and all tokens on the same block. Args: input_mask: bool[B, N] true if its part of the input, false if padding. mask_ar: int32[B, N] mask that's 1 where previous tokens cannot depend on it and 0 where it shares the same attention mask as the previous token. """ if att_masks.ndim != 2: raise ValueError(att_masks.ndim) if pad_masks.ndim != 2: raise ValueError(pad_masks.ndim) cumsum = torch.cumsum(att_masks, dim=1) att_2d_masks = cumsum[:, None, :] <= cumsum[:, :, None] pad_2d_masks = pad_masks[:, None, :] * pad_masks[:, :, None] att_2d_masks = att_2d_masks & pad_2d_masks return att_2d_masks def resize_with_pad(img, width, height, pad_value=-1): # assume no-op when width height fits already if img.ndim != 4: raise ValueError(f"(b,c,h,w) expected, but {img.shape}") cur_height, cur_width = img.shape[2:] ratio = max(cur_width / width, cur_height / height) resized_height = int(cur_height / ratio) resized_width = int(cur_width / ratio) resized_img = F.interpolate( img, size=(resized_height, resized_width), mode="bilinear", align_corners=False ) pad_height = max(0, int(height - resized_height)) pad_width = max(0, int(width - resized_width)) # pad on left and top of image padded_img = F.pad(resized_img, (pad_width, 0, pad_height, 0), value=pad_value) return padded_img def pad_vector(vector, new_dim): """Can be (batch_size x sequence_length x features_dimension) or (batch_size x features_dimension) """ if vector.shape[-1] == new_dim: return vector shape = list(vector.shape) current_dim = shape[-1] shape[-1] = new_dim new_vector = torch.zeros(*shape, dtype=vector.dtype, device=vector.device) new_vector[..., :current_dim] = vector return new_vector def normalize(x, min_val, max_val): return (x - min_val) / (max_val - min_val) def unnormalize(x, min_val, max_val): return x * (max_val - min_val) + min_val def safe_arcsin(value): # This ensures that the input stays within # [โˆ’1,1] to avoid invalid values for arcsin return torch.arcsin(torch.clamp(value, -1.0, 1.0)) def aloha_gripper_to_angular(value): # Aloha transforms the gripper positions into a linear space. The following code # reverses this transformation to be consistent with pi0 which is pretrained in # angular space. # # These values are coming from the Aloha code: # PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED value = unnormalize(value, min_val=0.01844, max_val=0.05800) # This is the inverse of the angular to linear transformation inside the Interbotix code. def linear_to_radian(linear_position, arm_length, horn_radius): value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position) return safe_arcsin(value) # The constants are taken from the Interbotix code. value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022) # Normalize to [0, 1]. # The values 0.4 and 1.5 were measured on an actual Trossen robot. return normalize(value, min_val=0.4, max_val=1.5) def aloha_gripper_from_angular(value): # Convert from the gripper position used by pi0 to the gripper position that is used by Aloha. # Note that the units are still angular but the range is different. # The values 0.4 and 1.5 were measured on an actual Trossen robot. value = unnormalize(value, min_val=0.4, max_val=1.5) # These values are coming from the Aloha code: # PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE return normalize(value, min_val=-0.6213, max_val=1.4910) def aloha_gripper_from_angular_inv(value): # Directly inverts the gripper_from_angular function. value = unnormalize(value, min_val=-0.6213, max_val=1.4910) return normalize(value, min_val=0.4, max_val=1.5) class PI0Policy(PreTrainedPolicy): """Wrapper class around PI0FlowMatching model to train and run inference within LeRobot.""" config_class = PI0Config name = "pi0" def __init__( self, config: PI0Config, dataset_stats: dict[str, dict[str, Tensor]] | None = None, ): """ Args: config: Policy configuration class instance or None, in which case the default instantiation of the configuration class is used. dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected that they will be passed with a call to `load_state_dict` before the policy is used. """ super().__init__(config) config.validate_features() self.config = config self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats) self.normalize_targets = Normalize( config.output_features, config.normalization_mapping, dataset_stats ) self.unnormalize_outputs = Unnormalize( config.output_features, config.normalization_mapping, dataset_stats ) self.language_tokenizer = AutoTokenizer.from_pretrained("google/paligemma-3b-pt-224") self.model = PI0FlowMatching(config) self.reset() def reset(self): """This should be called whenever the environment is reset.""" self._action_queue = deque([], maxlen=self.config.n_action_steps) @classmethod def _transform_state_dict_keys(cls, state_dict: dict) -> dict: """ Transform state dict keys to match expected model structure. Transformations: - model.paligemma_with_expert.paligemma.language_model.lm_head -> model.paligemma_with_expert.paligemma.lm_head - model.paligemma_with_expert.paligemma.language_model.model -> model.paligemma_with_expert.paligemma.model.language_model - model.paligemma_with_expert.paligemma.vision_tower -> model.paligemma_with_expert.paligemma.model.vision_tower - model.paligemma_with_expert.paligemma.multi_modal_projector -> model.paligemma_with_expert.paligemma.model.multi_modal_projector Also handles tied weights between lm_head.weight and embed_tokens.weight. """ import re transformed_dict = {} transformations = [ ( re.compile(r"\.paligemma_with_expert\.paligemma\.language_model\.lm_head"), ".paligemma_with_expert.paligemma.lm_head", ), ( re.compile(r"\.paligemma_with_expert\.paligemma\.language_model\.model"), ".paligemma_with_expert.paligemma.model.language_model", ), ( re.compile(r"\.paligemma_with_expert\.paligemma\.vision_tower"), ".paligemma_with_expert.paligemma.model.vision_tower", ), ( re.compile(r"\.paligemma_with_expert\.paligemma\.multi_modal_projector"), ".paligemma_with_expert.paligemma.model.multi_modal_projector", ), ] for key, value in state_dict.items(): new_key = key for pattern, replacement in transformations: new_key = pattern.sub(replacement, new_key) transformed_dict[new_key] = value # Handle tied weights: lm_head.weight and embed_tokens.weight share memory lm_head_key = None embed_tokens_key = None for key in transformed_dict: if key.endswith(".paligemma_with_expert.paligemma.lm_head.weight"): lm_head_key = key elif key.endswith(".paligemma_with_expert.paligemma.model.language_model.embed_tokens.weight"): embed_tokens_key = key if lm_head_key and embed_tokens_key: break if lm_head_key and not embed_tokens_key: embed_tokens_key = lm_head_key.replace( ".lm_head.weight", ".model.language_model.embed_tokens.weight" ) transformed_dict[embed_tokens_key] = transformed_dict[lm_head_key] elif embed_tokens_key and not lm_head_key: lm_head_key = embed_tokens_key.replace( ".model.language_model.embed_tokens.weight", ".lm_head.weight" ) transformed_dict[lm_head_key] = transformed_dict[embed_tokens_key] return transformed_dict @classmethod def _load_as_safetensor( cls, model: "PI0Policy", model_file: str, map_location: str, strict: bool ) -> "PI0Policy": """Override to apply key transformations before loading.""" from safetensors.torch import load_file init_logging() # Load the state dict from file safely state_dict = load_file(model_file, device=map_location) # Apply key transformations transformed_state_dict = cls._transform_state_dict_keys(state_dict) # Load the transformed state dict msg = model.load_state_dict(transformed_state_dict, strict=strict) # Log message log_model_loading_keys(msg.missing_keys, msg.unexpected_keys) return model def get_optim_params(self) -> dict: return self.parameters() @classmethod def from_pretrained(cls, *args, **kwargs): """Override the from_pretrained method to display important disclaimer.""" print( "โš ๏ธ DISCLAIMER: The PI0 model is ported from JAX by the Hugging Face team. \n" " It is not expected to perform as well as the original implementation. \n" " Original implementation: https://github.com/Physical-Intelligence/openpi" ) return super().from_pretrained(*args, **kwargs) @torch.no_grad() def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: """Predict a chunk of actions given environment observations.""" raise NotImplementedError("Currently not implemented for PI0") @torch.no_grad() def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: """Select a single action given environment observations. This method wraps `select_actions` in order to return one action at a time for execution in the environment. It works by managing the actions in a queue and only calling `select_actions` when the queue is empty. """ self.eval() if self.config.adapt_to_pi_aloha: batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE]) batch = self.normalize_inputs(batch) # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by # querying the policy. if len(self._action_queue) == 0: images, img_masks = self.prepare_images(batch) state = self.prepare_state(batch) lang_tokens, lang_masks = self.prepare_language(batch) actions = self.model.sample_actions( images, img_masks, lang_tokens, lang_masks, state, noise=noise ) # Unpad actions original_action_dim = self.config.action_feature.shape[0] actions = actions[:, :, :original_action_dim] actions = self.unnormalize_outputs({"action": actions})["action"] if self.config.adapt_to_pi_aloha: actions = self._pi_aloha_encode_actions(actions) # `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue # effectively has shape (n_action_steps, batch_size, *), hence the transpose. self._action_queue.extend(actions.transpose(0, 1)) return self._action_queue.popleft() def forward(self, batch: dict[str, Tensor], noise=None, time=None) -> tuple[Tensor, dict[str, Tensor]]: """Do a full training forward pass to compute the loss""" if self.config.adapt_to_pi_aloha: batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE]) batch[ACTION] = self._pi_aloha_encode_actions_inv(batch[ACTION]) batch = self.normalize_inputs(batch) batch = self.normalize_targets(batch) images, img_masks = self.prepare_images(batch) state = self.prepare_state(batch) lang_tokens, lang_masks = self.prepare_language(batch) actions = self.prepare_action(batch) actions_is_pad = batch.get("action_is_pad") loss_dict = {} losses = self.model.forward(images, img_masks, lang_tokens, lang_masks, state, actions, noise, time) loss_dict["losses_after_forward"] = losses.clone() if actions_is_pad is not None: in_episode_bound = ~actions_is_pad losses = losses * in_episode_bound.unsqueeze(-1) loss_dict["losses_after_in_ep_bound"] = losses.clone() # Remove padding losses = losses[:, :, : self.config.max_action_dim] loss_dict["losses_after_rm_padding"] = losses.clone() # For backward pass loss = losses.mean() # For logging loss_dict["l2_loss"] = loss.item() return loss, loss_dict def prepare_images(self, batch): """Apply Pi0 preprocessing to the images, like resizing to 224x224 and padding to keep aspect ratio, and convert pixel range from [0.0, 1.0] to [-1.0, 1.0] as requested by SigLIP. """ images = [] img_masks = [] present_img_keys = [key for key in self.config.image_features if key in batch] missing_img_keys = [key for key in self.config.image_features if key not in batch] if len(present_img_keys) == 0: raise ValueError( f"All image features are missing from the batch. At least one expected. (batch: {batch.keys()}) (image_features:{self.config.image_features})" ) # Preprocess image features present in the batch for key in present_img_keys: img = batch[key] if self.config.resize_imgs_with_padding is not None: img = resize_with_pad(img, *self.config.resize_imgs_with_padding, pad_value=0) # Normalize from range [0,1] to [-1,1] as expected by siglip img = img * 2.0 - 1.0 bsize = img.shape[0] device = img.device mask = torch.ones(bsize, dtype=torch.bool, device=device) images.append(img) img_masks.append(mask) # Create image features not present in the batch # as fully 0 padded images. for num_empty_cameras in range(len(missing_img_keys)): if num_empty_cameras >= self.config.empty_cameras: break img = torch.ones_like(img) * -1 mask = torch.zeros_like(mask) images.append(img) img_masks.append(mask) return images, img_masks def prepare_language(self, batch) -> tuple[Tensor, Tensor]: """Tokenize the text input""" device = batch[OBS_STATE].device tasks = batch["task"] # PaliGemma prompt has to end with a new line tasks = [task if task.endswith("\n") else f"{task}\n" for task in tasks] tokenized_prompt = self.language_tokenizer.__call__( tasks, padding="max_length", padding_side="right", max_length=self.config.tokenizer_max_length, return_tensors="pt", ) lang_tokens = tokenized_prompt["input_ids"].to(device=device) lang_masks = tokenized_prompt["attention_mask"].to(device=device, dtype=torch.bool) return lang_tokens, lang_masks def _pi_aloha_decode_state(self, state): # Flip the joints. for motor_idx in [1, 2, 8, 9]: state[:, motor_idx] *= -1 # Reverse the gripper transformation that is being applied by the Aloha runtime. for motor_idx in [6, 13]: state[:, motor_idx] = aloha_gripper_to_angular(state[:, motor_idx]) return state def _pi_aloha_encode_actions(self, actions): # Flip the joints. for motor_idx in [1, 2, 8, 9]: actions[:, :, motor_idx] *= -1 # Reverse the gripper transformation that is being applied by the Aloha runtime. for motor_idx in [6, 13]: actions[:, :, motor_idx] = aloha_gripper_from_angular(actions[:, :, motor_idx]) return actions def _pi_aloha_encode_actions_inv(self, actions): # Flip the joints again. for motor_idx in [1, 2, 8, 9]: actions[:, :, motor_idx] *= -1 # Reverse the gripper transformation that is being applied by the Aloha runtime. for motor_idx in [6, 13]: actions[:, :, motor_idx] = aloha_gripper_from_angular_inv(actions[:, :, motor_idx]) return actions def prepare_state(self, batch): """Pad state""" state = pad_vector(batch[OBS_STATE], self.config.max_state_dim) return state def prepare_action(self, batch): """Pad action""" actions = pad_vector(batch[ACTION], self.config.max_action_dim) return actions class PI0FlowMatching(nn.Module): """ ฯ€0: A Vision-Language-Action Flow Model for General Robot Control [Paper](https://www.physicalintelligence.company/download/pi0.pdf) [Jax code](https://github.com/Physical-Intelligence/openpi) Designed by Physical Intelligence. Ported from Jax by Hugging Face. โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ actions โ”‚ โ”‚ โ–ฒ โ”‚ โ”‚ โ”Œโ”ดโ”€โ”€โ”€โ”€โ”€โ” โ”‚ โ”‚ kv cache โ”‚Gemma โ”‚ โ”‚ โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–บโ”‚Expertโ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”Œโ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚x 10 โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ””โ–ฒโ”€โ”€โ–ฒโ”€โ”€โ”˜ โ”‚ โ”‚ โ”‚PaliGemmaโ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ robot state โ”‚ โ”‚ โ”‚ โ”‚ noise โ”‚ โ”‚ โ””โ–ฒโ”€โ”€โ–ฒโ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ image(s) โ”‚ โ”‚ language tokens โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ """ def __init__(self, config): super().__init__() self.config = config paligemma_with_export_config = PaliGemmaWithExpertConfig( freeze_vision_encoder=self.config.freeze_vision_encoder, train_expert_only=self.config.train_expert_only, attention_implementation=self.config.attention_implementation, ) self.paligemma_with_expert = PaliGemmaWithExpertModel(paligemma_with_export_config) # Projections are float32 self.state_proj = nn.Linear(self.config.max_state_dim, self.config.proj_width) self.action_in_proj = nn.Linear(self.config.max_action_dim, self.config.proj_width) self.action_out_proj = nn.Linear(self.config.proj_width, self.config.max_action_dim) self.action_time_mlp_in = nn.Linear(self.config.proj_width * 2, self.config.proj_width) self.action_time_mlp_out = nn.Linear(self.config.proj_width, self.config.proj_width) self.set_requires_grad() def set_requires_grad(self): for params in self.state_proj.parameters(): params.requires_grad = self.config.train_state_proj def sample_noise(self, shape, device): noise = torch.normal( mean=0.0, std=1.0, size=shape, dtype=torch.float32, device=device, ) return noise def sample_time(self, bsize, device): beta_dist = torch.distributions.Beta(concentration1=1.5, concentration0=1.0) time_beta = beta_dist.sample((bsize,)).to(device=device, dtype=torch.float32) time = time_beta * 0.999 + 0.001 return time def embed_prefix( self, images, img_masks, lang_tokens, lang_masks ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Embed images with SigLIP and language tokens with embedding layer to prepare for PaliGemma transformer processing. """ # TODO: avoid list in python and torch.cat ; prefer pre-allocation with torch.empty embs = [] pad_masks = [] att_masks = [] # TODO: remove for loop for ( img, img_mask, ) in zip(images, img_masks, strict=False): img_emb = self.paligemma_with_expert.embed_image(img) img_emb = img_emb.to(dtype=torch.bfloat16) # Normalize image embeddings img_emb_dim = img_emb.shape[-1] img_emb = img_emb * torch.tensor(img_emb_dim**0.5, dtype=img_emb.dtype, device=img_emb.device) bsize, num_img_embs = img_emb.shape[:2] img_mask = img_mask[:, None].expand(bsize, num_img_embs) embs.append(img_emb) pad_masks.append(img_mask) # Create attention masks so that image tokens attend to each other att_masks += [0] * num_img_embs lang_emb = self.paligemma_with_expert.embed_language_tokens(lang_tokens) # Normalize language embeddings lang_emb_dim = lang_emb.shape[-1] lang_emb = lang_emb * math.sqrt(lang_emb_dim) embs.append(lang_emb) pad_masks.append(lang_masks) # full attention between image and language inputs num_lang_embs = lang_emb.shape[1] att_masks += [0] * num_lang_embs embs = torch.cat(embs, dim=1) pad_masks = torch.cat(pad_masks, dim=1) att_masks = torch.tensor(att_masks, dtype=torch.bool, device=pad_masks.device) att_masks = att_masks[None, :].expand(bsize, len(att_masks)) return embs, pad_masks, att_masks def embed_suffix(self, state, noisy_actions, timestep): """Embed state, noisy_actions, timestep to prepare for Expert Gemma processing.""" embs = [] pad_masks = [] att_masks = [] # Embed state state_emb = self.state_proj(state) state_emb = state_emb.to(dtype=torch.bfloat16) embs.append(state_emb[:, None, :]) bsize = state_emb.shape[0] dtype = state_emb.dtype device = state_emb.device state_mask = torch.ones(bsize, 1, dtype=torch.bool, device=device) pad_masks.append(state_mask) # Set attention masks so that image and language inputs do not attend to state or actions att_masks += [1] # Embed timestep using sine-cosine positional encoding with sensitivity in the range [0, 1] time_emb = create_sinusoidal_pos_embedding( timestep, self.config.proj_width, min_period=4e-3, max_period=4.0, device=device ) time_emb = time_emb.type(dtype=dtype) # Fuse timestep + action information using an MLP action_emb = self.action_in_proj(noisy_actions) time_emb = time_emb[:, None, :].expand_as(action_emb) action_time_emb = torch.cat([action_emb, time_emb], dim=2) action_time_emb = self.action_time_mlp_in(action_time_emb) action_time_emb = F.silu(action_time_emb) # swish == silu action_time_emb = self.action_time_mlp_out(action_time_emb) # Add to input tokens embs.append(action_time_emb) bsize, action_time_dim = action_time_emb.shape[:2] action_time_mask = torch.ones(bsize, action_time_dim, dtype=torch.bool, device=device) pad_masks.append(action_time_mask) # Set attention masks so that image, language and state inputs do not attend to action tokens att_masks += [1] + ([0] * (self.config.n_action_steps - 1)) embs = torch.cat(embs, dim=1) pad_masks = torch.cat(pad_masks, dim=1) att_masks = torch.tensor(att_masks, dtype=embs.dtype, device=embs.device) att_masks = att_masks[None, :].expand(bsize, len(att_masks)) return embs, pad_masks, att_masks def forward( self, images, img_masks, lang_tokens, lang_masks, state, actions, noise=None, time=None ) -> Tensor: """Do a full training forward pass and compute the loss (batch_size x num_steps x num_motors)""" if noise is None: noise = self.sample_noise(actions.shape, actions.device) if time is None: time = self.sample_time(actions.shape[0], actions.device) time_expanded = time[:, None, None] x_t = time_expanded * noise + (1 - time_expanded) * actions u_t = noise - actions prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix( images, img_masks, lang_tokens, lang_masks ) suffix_embs, suffix_pad_masks, suffix_att_masks = self.embed_suffix(state, x_t, time) pad_masks = torch.cat([prefix_pad_masks, suffix_pad_masks], dim=1) att_masks = torch.cat([prefix_att_masks, suffix_att_masks], dim=1) att_2d_masks = make_att_2d_masks(pad_masks, att_masks) position_ids = torch.cumsum(pad_masks, dim=1) - 1 (_, suffix_out), _ = self.paligemma_with_expert.forward( attention_mask=att_2d_masks, position_ids=position_ids, past_key_values=None, inputs_embeds=[prefix_embs, suffix_embs], use_cache=False, fill_kv_cache=False, ) suffix_out = suffix_out[:, -self.config.n_action_steps :] # Original openpi code, upcast attention output suffix_out = suffix_out.to(dtype=torch.float32) v_t = self.action_out_proj(suffix_out) losses = F.mse_loss(u_t, v_t, reduction="none") return losses def sample_actions(self, images, img_masks, lang_tokens, lang_masks, state, noise=None) -> Tensor: """Do a full inference forward and compute the action (batch_size x num_steps x num_motors)""" bsize = state.shape[0] device = state.device if noise is None: actions_shape = (bsize, self.config.n_action_steps, self.config.max_action_dim) noise = self.sample_noise(actions_shape, device) prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix( images, img_masks, lang_tokens, lang_masks ) prefix_att_2d_masks = make_att_2d_masks(prefix_pad_masks, prefix_att_masks) prefix_position_ids = torch.cumsum(prefix_pad_masks, dim=1) - 1 # Compute image and language key value cache _, past_key_values = self.paligemma_with_expert.forward( attention_mask=prefix_att_2d_masks, position_ids=prefix_position_ids, past_key_values=None, inputs_embeds=[prefix_embs, None], use_cache=self.config.use_cache, fill_kv_cache=True, ) dt = -1.0 / self.config.num_steps dt = torch.tensor(dt, dtype=torch.float32, device=device) x_t = noise time = torch.tensor(1.0, dtype=torch.float32, device=device) while time >= -dt / 2: expanded_time = time.expand(bsize) v_t = self.denoise_step( state, prefix_pad_masks, past_key_values, x_t, expanded_time, ) # Euler step x_t += dt * v_t time += dt return x_t def denoise_step( self, state, prefix_pad_masks, past_key_values, x_t, timestep, ): """Apply one denoising step of the noise `x_t` at a given timestep.""" suffix_embs, suffix_pad_masks, suffix_att_masks = self.embed_suffix(state, x_t, timestep) suffix_len = suffix_pad_masks.shape[1] batch_size = prefix_pad_masks.shape[0] prefix_len = prefix_pad_masks.shape[1] prefix_pad_2d_masks = prefix_pad_masks[:, None, :].expand(batch_size, suffix_len, prefix_len) suffix_att_2d_masks = make_att_2d_masks(suffix_pad_masks, suffix_att_masks) full_att_2d_masks = torch.cat([prefix_pad_2d_masks, suffix_att_2d_masks], dim=2) prefix_offsets = torch.sum(prefix_pad_masks, dim=-1)[:, None] position_ids = prefix_offsets + torch.cumsum(suffix_pad_masks, dim=1) - 1 outputs_embeds, _ = self.paligemma_with_expert.forward( attention_mask=full_att_2d_masks, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=[None, suffix_embs], use_cache=self.config.use_cache, fill_kv_cache=False, ) suffix_out = outputs_embeds[1] suffix_out = suffix_out[:, -self.config.n_action_steps :] suffix_out = suffix_out.to(dtype=torch.float32) v_t = self.action_out_proj(suffix_out) return v_t
lerobot/src/lerobot/policies/pi0/modeling_pi0.py/0
{ "file_path": "lerobot/src/lerobot/policies/pi0/modeling_pi0.py", "repo_id": "lerobot", "token_count": 14362 }
213
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from collections import deque import torch from torch import nn def populate_queues( queues: dict[str, deque], batch: dict[str, torch.Tensor], exclude_keys: list[str] | None = None ): if exclude_keys is None: exclude_keys = [] for key in batch: # Ignore keys not in the queues already (leaving the responsibility to the caller to make sure the # queues have the keys they want). if key not in queues or key in exclude_keys: continue if len(queues[key]) != queues[key].maxlen: # initialize by copying the first observation several times until the queue is full while len(queues[key]) != queues[key].maxlen: queues[key].append(batch[key]) else: # add latest observation to the queue queues[key].append(batch[key]) return queues def get_device_from_parameters(module: nn.Module) -> torch.device: """Get a module's device by checking one of its parameters. Note: assumes that all parameters have the same device """ return next(iter(module.parameters())).device def get_dtype_from_parameters(module: nn.Module) -> torch.dtype: """Get a module's parameter dtype by checking one of its parameters. Note: assumes that all parameters have the same dtype. """ return next(iter(module.parameters())).dtype def get_output_shape(module: nn.Module, input_shape: tuple) -> tuple: """ Calculates the output shape of a PyTorch module given an input shape. Args: module (nn.Module): a PyTorch module input_shape (tuple): A tuple representing the input shape, e.g., (batch_size, channels, height, width) Returns: tuple: The output shape of the module. """ dummy_input = torch.zeros(size=input_shape) with torch.inference_mode(): output = module(dummy_input) return tuple(output.shape) def log_model_loading_keys(missing_keys: list[str], unexpected_keys: list[str]) -> None: """Log missing and unexpected keys when loading a model. Args: missing_keys (list[str]): Keys that were expected but not found. unexpected_keys (list[str]): Keys that were found but not expected. """ if missing_keys: logging.warning(f"Missing key(s) when loading model: {missing_keys}") if unexpected_keys: logging.warning(f"Unexpected key(s) when loading model: {unexpected_keys}")
lerobot/src/lerobot/policies/utils.py/0
{ "file_path": "lerobot/src/lerobot/policies/utils.py", "repo_id": "lerobot", "token_count": 1049 }
214
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from lerobot.cameras import CameraConfig from ..config import RobotConfig @RobotConfig.register_subclass("bi_so100_follower") @dataclass class BiSO100FollowerConfig(RobotConfig): left_arm_port: str right_arm_port: str # Optional left_arm_disable_torque_on_disconnect: bool = True left_arm_max_relative_target: int | None = None left_arm_use_degrees: bool = False right_arm_disable_torque_on_disconnect: bool = True right_arm_max_relative_target: int | None = None right_arm_use_degrees: bool = False # cameras (shared between both arms) cameras: dict[str, CameraConfig] = field(default_factory=dict)
lerobot/src/lerobot/robots/bi_so100_follower/config_bi_so100_follower.py/0
{ "file_path": "lerobot/src/lerobot/robots/bi_so100_follower/config_bi_so100_follower.py", "repo_id": "lerobot", "token_count": 415 }
215
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import json import logging import time import cv2 import zmq from .config_lekiwi import LeKiwiConfig, LeKiwiHostConfig from .lekiwi import LeKiwi class LeKiwiHost: def __init__(self, config: LeKiwiHostConfig): self.zmq_context = zmq.Context() self.zmq_cmd_socket = self.zmq_context.socket(zmq.PULL) self.zmq_cmd_socket.setsockopt(zmq.CONFLATE, 1) self.zmq_cmd_socket.bind(f"tcp://*:{config.port_zmq_cmd}") self.zmq_observation_socket = self.zmq_context.socket(zmq.PUSH) self.zmq_observation_socket.setsockopt(zmq.CONFLATE, 1) self.zmq_observation_socket.bind(f"tcp://*:{config.port_zmq_observations}") self.connection_time_s = config.connection_time_s self.watchdog_timeout_ms = config.watchdog_timeout_ms self.max_loop_freq_hz = config.max_loop_freq_hz def disconnect(self): self.zmq_observation_socket.close() self.zmq_cmd_socket.close() self.zmq_context.term() def main(): logging.info("Configuring LeKiwi") robot_config = LeKiwiConfig() robot = LeKiwi(robot_config) logging.info("Connecting LeKiwi") robot.connect() logging.info("Starting HostAgent") host_config = LeKiwiHostConfig() host = LeKiwiHost(host_config) last_cmd_time = time.time() watchdog_active = False logging.info("Waiting for commands...") try: # Business logic start = time.perf_counter() duration = 0 while duration < host.connection_time_s: loop_start_time = time.time() try: msg = host.zmq_cmd_socket.recv_string(zmq.NOBLOCK) data = dict(json.loads(msg)) _action_sent = robot.send_action(data) last_cmd_time = time.time() watchdog_active = False except zmq.Again: if not watchdog_active: logging.warning("No command available") except Exception as e: logging.error("Message fetching failed: %s", e) now = time.time() if (now - last_cmd_time > host.watchdog_timeout_ms / 1000) and not watchdog_active: logging.warning( f"Command not received for more than {host.watchdog_timeout_ms} milliseconds. Stopping the base." ) watchdog_active = True robot.stop_base() last_observation = robot.get_observation() # Encode ndarrays to base64 strings for cam_key, _ in robot.cameras.items(): ret, buffer = cv2.imencode( ".jpg", last_observation[cam_key], [int(cv2.IMWRITE_JPEG_QUALITY), 90] ) if ret: last_observation[cam_key] = base64.b64encode(buffer).decode("utf-8") else: last_observation[cam_key] = "" # Send the observation to the remote agent try: host.zmq_observation_socket.send_string(json.dumps(last_observation), flags=zmq.NOBLOCK) except zmq.Again: logging.info("Dropping observation, no client connected") # Ensure a short sleep to avoid overloading the CPU. elapsed = time.time() - loop_start_time time.sleep(max(1 / host.max_loop_freq_hz - elapsed, 0)) duration = time.perf_counter() - start print("Cycle time reached.") except KeyboardInterrupt: print("Keyboard interrupt received. Exiting...") finally: print("Shutting down Lekiwi Host.") robot.disconnect() host.disconnect() logging.info("Finished LeKiwi cleanly") if __name__ == "__main__": main()
lerobot/src/lerobot/robots/lekiwi/lekiwi_host.py/0
{ "file_path": "lerobot/src/lerobot/robots/lekiwi/lekiwi_host.py", "repo_id": "lerobot", "token_count": 1993 }
216
This tutorial explains how to use [Aloha and Aloha 2 stationary](https://www.trossenrobotics.com/aloha-stationary) with LeRobot. ## Setup Follow the [documentation from Trossen Robotics](https://docs.trossenrobotics.com/aloha_docs/2.0/getting_started/stationary/hardware_setup.html) for setting up the hardware and plugging the 4 arms and 4 cameras to your computer. ## Install LeRobot On your computer: 1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install): ```bash mkdir -p ~/miniconda3 wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3 rm ~/miniconda3/miniconda.sh ~/miniconda3/bin/conda init bash ``` 2. Restart shell or `source ~/.bashrc` 3. Create and activate a fresh conda environment for lerobot ```bash conda create -y -n lerobot python=3.10 && conda activate lerobot ``` 4. Clone LeRobot: ```bash git clone https://github.com/huggingface/lerobot.git ~/lerobot ``` 5. When using `miniconda`, install `ffmpeg` in your environment: ```bash conda install ffmpeg -c conda-forge ``` 6. Install LeRobot with dependencies for the Aloha motors (dynamixel) and cameras (intelrealsense): ```bash cd ~/lerobot && pip install -e ".[dynamixel, intelrealsense]" ``` ## Teleoperate \*\*/!\ FOR SAFETY, READ THIS /!\*\* Teleoperation consists in manually operating the leader arms to move the follower arms. Importantly: 1. Make sure your leader arms are in the same position as the follower arms, so that the follower arms don't move too fast to match the leader arms, 2. Our code assumes that your robot has been assembled following Trossen Robotics instructions. This allows us to skip calibration, as we use the pre-defined calibration files in `.cache/calibration/aloha_default`. If you replace a motor, make sure you follow the exact instructions from Trossen Robotics. By running the following code, you can start your first **SAFE** teleoperation: > **NOTE:** To visualize the data, enable `--control.display_data=true`. This streams the data using `rerun`. ```bash python lerobot/scripts/control_robot.py \ --robot.type=aloha \ --robot.max_relative_target=5 \ --control.type=teleoperate ``` By adding `--robot.max_relative_target=5`, we override the default value for `max_relative_target` defined in [`ViperXConfig`](./config_viperx.py). It is expected to be `5` to limit the magnitude of the movement for more safety, but the teleoperation won't be smooth. When you feel confident, you can disable this limit by adding `--robot.max_relative_target=null` to the command line: ```bash python lerobot/scripts/control_robot.py \ --robot.type=aloha \ --robot.max_relative_target=null \ --control.type=teleoperate ``` ## Record a dataset Once you're familiar with teleoperation, you can record your first dataset with Aloha. If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens): ```bash huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential ``` Store your Hugging Face repository name in a variable to run these commands: ```bash HF_USER=$(huggingface-cli whoami | head -n 1) echo $HF_USER ``` Record 2 episodes and upload your dataset to the hub: ```bash python lerobot/scripts/control_robot.py \ --robot.type=aloha \ --robot.max_relative_target=null \ --control.type=record \ --control.fps=30 \ --control.single_task="Grasp a lego block and put it in the bin." \ --control.repo_id=${HF_USER}/aloha_test \ --control.tags='["tutorial"]' \ --control.warmup_time_s=5 \ --control.episode_time_s=30 \ --control.reset_time_s=30 \ --control.num_episodes=2 \ --control.push_to_hub=true ``` ## Visualize a dataset If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: ```bash echo ${HF_USER}/aloha_test ``` If you didn't upload with `--control.push_to_hub=false`, you can also visualize it locally with: ```bash python -m lerobot.scripts.visualize_dataset_html \ --repo-id ${HF_USER}/aloha_test ``` ## Replay an episode \*\*/!\ FOR SAFETY, READ THIS /!\*\* Replay consists in automatically replaying the sequence of actions (i.e. goal positions for your motors) recorded in a given dataset episode. Make sure the current initial position of your robot is similar to the one in your episode, so that your follower arms don't move too fast to go to the first goal positions. For safety, you might want to add `--robot.max_relative_target=5` to your command line as explained above. Now try to replay the first episode on your robot: ```bash python lerobot/scripts/control_robot.py \ --robot.type=aloha \ --robot.max_relative_target=null \ --control.type=replay \ --control.fps=30 \ --control.repo_id=${HF_USER}/aloha_test \ --control.episode=0 ``` ## Train a policy To train a policy to control your robot, use the [`lerobot-train`](../src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: ```bash lerobot-train \ --dataset.repo_id=${HF_USER}/aloha_test \ --policy.type=act \ --output_dir=outputs/train/act_aloha_test \ --job_name=act_aloha_test \ --policy.device=cuda \ --wandb.enable=true ``` Let's explain it: 1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/aloha_test`. 2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../src/lerobot/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. 3. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon. 4. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md) Training should take several hours. You will find checkpoints in `outputs/train/act_aloha_test/checkpoints`. ## Evaluate your policy You can use the `record` function from [`lerobot/scripts/control_robot.py`](../src/lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes: ```bash python lerobot/scripts/control_robot.py \ --robot.type=aloha \ --control.type=record \ --control.fps=30 \ --control.single_task="Grasp a lego block and put it in the bin." \ --control.repo_id=${HF_USER}/eval_act_aloha_test \ --control.tags='["tutorial"]' \ --control.warmup_time_s=5 \ --control.episode_time_s=30 \ --control.reset_time_s=30 \ --control.num_episodes=10 \ --control.push_to_hub=true \ --control.policy.path=outputs/train/act_aloha_test/checkpoints/last/pretrained_model \ --control.num_image_writer_processes=1 ``` As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: 1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_aloha_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_aloha_test`). 2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_aloha_test`). 3. We use `--control.num_image_writer_processes=1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constant 30 fps during inference. Feel free to explore different values for `--control.num_image_writer_processes`. ## More Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth explanation. If you have any question or need help, please reach out on Discord in the channel `#aloha-arm`.
lerobot/src/lerobot/robots/viperx/README.md/0
{ "file_path": "lerobot/src/lerobot/robots/viperx/README.md", "repo_id": "lerobot", "token_count": 2779 }
217
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Example: ```shell python src/lerobot/scripts/server/policy_server.py \ --host=127.0.0.1 \ --port=8080 \ --fps=30 \ --inference_latency=0.033 \ --obs_queue_timeout=1 ``` """ import logging import pickle # nosec import threading import time from concurrent import futures from dataclasses import asdict from pprint import pformat from queue import Empty, Queue import draccus import grpc import torch from lerobot.policies.factory import get_policy_class from lerobot.scripts.server.configs import PolicyServerConfig from lerobot.scripts.server.constants import SUPPORTED_POLICIES from lerobot.scripts.server.helpers import ( FPSTracker, Observation, RemotePolicyConfig, TimedAction, TimedObservation, get_logger, observations_similar, raw_observation_to_observation, ) from lerobot.transport import ( services_pb2, # type: ignore services_pb2_grpc, # type: ignore ) from lerobot.transport.utils import receive_bytes_in_chunks class PolicyServer(services_pb2_grpc.AsyncInferenceServicer): prefix = "policy_server" logger = get_logger(prefix) def __init__(self, config: PolicyServerConfig): self.config = config self.shutdown_event = threading.Event() # FPS measurement self.fps_tracker = FPSTracker(target_fps=config.fps) self.observation_queue = Queue(maxsize=1) self._predicted_timesteps_lock = threading.Lock() self._predicted_timesteps = set() self.last_processed_obs = None # Attributes will be set by SendPolicyInstructions self.device = None self.policy_type = None self.lerobot_features = None self.actions_per_chunk = None self.policy = None @property def running(self): return not self.shutdown_event.is_set() @property def policy_image_features(self): return self.policy.config.image_features def _reset_server(self) -> None: """Flushes server state when new client connects.""" # only running inference on the latest observation received by the server self.shutdown_event.set() self.observation_queue = Queue(maxsize=1) with self._predicted_timesteps_lock: self._predicted_timesteps = set() def Ready(self, request, context): # noqa: N802 client_id = context.peer() self.logger.info(f"Client {client_id} connected and ready") self._reset_server() self.shutdown_event.clear() return services_pb2.Empty() def SendPolicyInstructions(self, request, context): # noqa: N802 """Receive policy instructions from the robot client""" if not self.running: self.logger.warning("Server is not running. Ignoring policy instructions.") return services_pb2.Empty() client_id = context.peer() policy_specs = pickle.loads(request.data) # nosec if not isinstance(policy_specs, RemotePolicyConfig): raise TypeError(f"Policy specs must be a RemotePolicyConfig. Got {type(policy_specs)}") if policy_specs.policy_type not in SUPPORTED_POLICIES: raise ValueError( f"Policy type {policy_specs.policy_type} not supported. " f"Supported policies: {SUPPORTED_POLICIES}" ) self.logger.info( f"Receiving policy instructions from {client_id} | " f"Policy type: {policy_specs.policy_type} | " f"Pretrained name or path: {policy_specs.pretrained_name_or_path} | " f"Actions per chunk: {policy_specs.actions_per_chunk} | " f"Device: {policy_specs.device}" ) self.device = policy_specs.device self.policy_type = policy_specs.policy_type # act, pi0, etc. self.lerobot_features = policy_specs.lerobot_features self.actions_per_chunk = policy_specs.actions_per_chunk policy_class = get_policy_class(self.policy_type) start = time.perf_counter() self.policy = policy_class.from_pretrained(policy_specs.pretrained_name_or_path) self.policy.to(self.device) end = time.perf_counter() self.logger.info(f"Time taken to put policy on {self.device}: {end - start:.4f} seconds") return services_pb2.Empty() def SendObservations(self, request_iterator, context): # noqa: N802 """Receive observations from the robot client""" client_id = context.peer() self.logger.debug(f"Receiving observations from {client_id}") receive_time = time.time() # comparing timestamps so need time.time() start_deserialize = time.perf_counter() received_bytes = receive_bytes_in_chunks( request_iterator, None, self.shutdown_event, self.logger ) # blocking call while looping over request_iterator timed_observation = pickle.loads(received_bytes) # nosec deserialize_time = time.perf_counter() - start_deserialize self.logger.debug(f"Received observation #{timed_observation.get_timestep()}") obs_timestep = timed_observation.get_timestep() obs_timestamp = timed_observation.get_timestamp() # Calculate FPS metrics fps_metrics = self.fps_tracker.calculate_fps_metrics(obs_timestamp) self.logger.info( f"Received observation #{obs_timestep} | " f"Avg FPS: {fps_metrics['avg_fps']:.2f} | " # fps at which observations are received from client f"Target: {fps_metrics['target_fps']:.2f} | " f"One-way latency: {(receive_time - obs_timestamp) * 1000:.2f}ms" ) self.logger.debug( f"Server timestamp: {receive_time:.6f} | " f"Client timestamp: {obs_timestamp:.6f} | " f"Deserialization time: {deserialize_time:.6f}s" ) if not self._enqueue_observation( timed_observation # wrapping a RawObservation ): self.logger.info(f"Observation #{obs_timestep} has been filtered out") return services_pb2.Empty() def GetActions(self, request, context): # noqa: N802 """Returns actions to the robot client. Actions are sent as a single chunk, containing multiple actions.""" client_id = context.peer() self.logger.debug(f"Client {client_id} connected for action streaming") # Generate action based on the most recent observation and its timestep try: getactions_starts = time.perf_counter() obs = self.observation_queue.get(timeout=self.config.obs_queue_timeout) self.logger.info( f"Running inference for observation #{obs.get_timestep()} (must_go: {obs.must_go})" ) with self._predicted_timesteps_lock: self._predicted_timesteps.add(obs.get_timestep()) start_time = time.perf_counter() action_chunk = self._predict_action_chunk(obs) inference_time = time.perf_counter() - start_time start_time = time.perf_counter() actions_bytes = pickle.dumps(action_chunk) # nosec serialize_time = time.perf_counter() - start_time # Create and return the action chunk actions = services_pb2.Actions(data=actions_bytes) self.logger.info( f"Action chunk #{obs.get_timestep()} generated | " f"Total time: {(inference_time + serialize_time) * 1000:.2f}ms" ) self.logger.debug( f"Action chunk #{obs.get_timestep()} generated | " f"Inference time: {inference_time:.2f}s |" f"Serialize time: {serialize_time:.2f}s |" f"Total time: {inference_time + serialize_time:.2f}s" ) time.sleep( max(0, self.config.inference_latency - max(0, time.perf_counter() - getactions_starts)) ) # sleep controls inference latency return actions except Empty: # no observation added to queue in obs_queue_timeout return services_pb2.Empty() except Exception as e: self.logger.error(f"Error in StreamActions: {e}") return services_pb2.Empty() def _obs_sanity_checks(self, obs: TimedObservation, previous_obs: TimedObservation) -> bool: """Check if the observation is valid to be processed by the policy""" with self._predicted_timesteps_lock: predicted_timesteps = self._predicted_timesteps if obs.get_timestep() in predicted_timesteps: self.logger.debug(f"Skipping observation #{obs.get_timestep()} - Timestep predicted already!") return False elif observations_similar(obs, previous_obs, lerobot_features=self.lerobot_features): self.logger.debug( f"Skipping observation #{obs.get_timestep()} - Observation too similar to last obs predicted!" ) return False else: return True def _enqueue_observation(self, obs: TimedObservation) -> bool: """Enqueue an observation if it must go through processing, otherwise skip it. Observations not in queue are never run through the policy network""" if ( obs.must_go or self.last_processed_obs is None or self._obs_sanity_checks(obs, self.last_processed_obs) ): last_obs = self.last_processed_obs.get_timestep() if self.last_processed_obs else "None" self.logger.debug( f"Enqueuing observation. Must go: {obs.must_go} | Last processed obs: {last_obs}" ) # If queue is full, get the old observation to make room if self.observation_queue.full(): # pops from queue _ = self.observation_queue.get_nowait() self.logger.debug("Observation queue was full, removed oldest observation") # Now put the new observation (never blocks as queue is non-full here) self.observation_queue.put(obs) return True return False def _time_action_chunk(self, t_0: float, action_chunk: list[torch.Tensor], i_0: int) -> list[TimedAction]: """Turn a chunk of actions into a list of TimedAction instances, with the first action corresponding to t_0 and the rest corresponding to t_0 + i*environment_dt for i in range(len(action_chunk)) """ return [ TimedAction(timestamp=t_0 + i * self.config.environment_dt, timestep=i_0 + i, action=action) for i, action in enumerate(action_chunk) ] def _prepare_observation(self, observation_t: TimedObservation) -> Observation: """ Prepare observation, ready for policy inference. E.g.: To keep observation sampling rate high (and network packet tiny) we send int8 [0,255] images from the client and then convert them to float32 [0,1] images here, before running inference. """ # RawObservation from robot.get_observation() - wrong keys, wrong dtype, wrong image shape observation: Observation = raw_observation_to_observation( observation_t.get_observation(), self.lerobot_features, self.policy_image_features, self.device, ) # processed Observation - right keys, right dtype, right image shape return observation def _get_action_chunk(self, observation: dict[str, torch.Tensor]) -> torch.Tensor: """Get an action chunk from the policy. The chunk contains only""" chunk = self.policy.predict_action_chunk(observation) if chunk.ndim != 3: chunk = chunk.unsqueeze(0) # adding batch dimension, now shape is (B, chunk_size, action_dim) return chunk[:, : self.actions_per_chunk, :] def _predict_action_chunk(self, observation_t: TimedObservation) -> list[TimedAction]: """Predict an action chunk based on an observation""" inference_starts = time.perf_counter() """1. Prepare observation""" start_time = time.perf_counter() observation = self._prepare_observation(observation_t) preprocessing_time = time.perf_counter() - start_time self.last_processed_obs: TimedObservation = observation_t """2. Get action chunk""" start_time = time.perf_counter() action_tensor = self._get_action_chunk(observation) inference_time = time.perf_counter() - start_time """3. Post-inference processing""" start_time = time.perf_counter() # Move to CPU before serializing action_tensor = action_tensor.cpu().squeeze(0) action_chunk = self._time_action_chunk( observation_t.get_timestamp(), list(action_tensor), observation_t.get_timestep() ) postprocessing_time = time.perf_counter() - start_time inference_stops = time.perf_counter() self.logger.info( f"Observation {observation_t.get_timestep()} |" f"Inference time: {1000 * (inference_stops - inference_starts):.2f}ms" ) # full-process latency breakdown for debugging purposes self.logger.debug( f"Observation {observation_t.get_timestep()} | " f"Preprocessing time: {1000 * (preprocessing_time - inference_starts):.2f}ms | " f"Inference time: {1000 * (inference_time - preprocessing_time):.2f}ms | " f"Postprocessing time: {1000 * (postprocessing_time - inference_time):.2f}ms | " f"Total time: {1000 * (postprocessing_time - inference_starts):.2f}ms" ) return action_chunk def stop(self): """Stop the server""" self._reset_server() self.logger.info("Server stopping...") @draccus.wrap() def serve(cfg: PolicyServerConfig): """Start the PolicyServer with the given configuration. Args: config: PolicyServerConfig instance. If None, uses default configuration. """ logging.info(pformat(asdict(cfg))) # Create the server instance first policy_server = PolicyServer(cfg) # Setup and start gRPC server server = grpc.server(futures.ThreadPoolExecutor(max_workers=4)) services_pb2_grpc.add_AsyncInferenceServicer_to_server(policy_server, server) server.add_insecure_port(f"{cfg.host}:{cfg.port}") policy_server.logger.info(f"PolicyServer started on {cfg.host}:{cfg.port}") server.start() server.wait_for_termination() policy_server.logger.info("Server terminated") if __name__ == "__main__": serve()
lerobot/src/lerobot/scripts/server/policy_server.py/0
{ "file_path": "lerobot/src/lerobot/scripts/server/policy_server.py", "repo_id": "lerobot", "token_count": 6373 }
218
# !/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from enum import IntEnum from typing import Any import numpy as np from ..teleoperator import Teleoperator from .configuration_gamepad import GamepadTeleopConfig class GripperAction(IntEnum): CLOSE = 0 STAY = 1 OPEN = 2 gripper_action_map = { "close": GripperAction.CLOSE.value, "open": GripperAction.OPEN.value, "stay": GripperAction.STAY.value, } class GamepadTeleop(Teleoperator): """ Teleop class to use gamepad inputs for control. """ config_class = GamepadTeleopConfig name = "gamepad" def __init__(self, config: GamepadTeleopConfig): super().__init__(config) self.config = config self.robot_type = config.type self.gamepad = None @property def action_features(self) -> dict: if self.config.use_gripper: return { "dtype": "float32", "shape": (4,), "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2, "gripper": 3}, } else: return { "dtype": "float32", "shape": (3,), "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2}, } @property def feedback_features(self) -> dict: return {} def connect(self) -> None: # use HidApi for macos if sys.platform == "darwin": # NOTE: On macOS, pygame doesnโ€™t reliably detect input from some controllers so we fall back to hidapi from .gamepad_utils import GamepadControllerHID as Gamepad else: from .gamepad_utils import GamepadController as Gamepad self.gamepad = Gamepad() self.gamepad.start() def get_action(self) -> dict[str, Any]: # Update the controller to get fresh inputs self.gamepad.update() # Get movement deltas from the controller delta_x, delta_y, delta_z = self.gamepad.get_deltas() # Create action from gamepad input gamepad_action = np.array([delta_x, delta_y, delta_z], dtype=np.float32) action_dict = { "delta_x": gamepad_action[0], "delta_y": gamepad_action[1], "delta_z": gamepad_action[2], } # Default gripper action is to stay gripper_action = GripperAction.STAY.value if self.config.use_gripper: gripper_command = self.gamepad.gripper_command() gripper_action = gripper_action_map[gripper_command] action_dict["gripper"] = gripper_action return action_dict def disconnect(self) -> None: """Disconnect from the gamepad.""" if self.gamepad is not None: self.gamepad.stop() self.gamepad = None def is_connected(self) -> bool: """Check if gamepad is connected.""" return self.gamepad is not None def calibrate(self) -> None: """Calibrate the gamepad.""" # No calibration needed for gamepad pass def is_calibrated(self) -> bool: """Check if gamepad is calibrated.""" # Gamepad doesn't require calibration return True def configure(self) -> None: """Configure the gamepad.""" # No additional configuration needed pass def send_feedback(self, feedback: dict) -> None: """Send feedback to the gamepad.""" # Gamepad doesn't support feedback pass
lerobot/src/lerobot/teleoperators/gamepad/teleop_gamepad.py/0
{ "file_path": "lerobot/src/lerobot/teleoperators/gamepad/teleop_gamepad.py", "repo_id": "lerobot", "token_count": 1694 }
219
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import json import logging import pickle # nosec B403: Safe usage for internal serialization only from multiprocessing import Event from queue import Queue from typing import Any import torch from lerobot.transport import services_pb2 from lerobot.utils.transition import Transition CHUNK_SIZE = 2 * 1024 * 1024 # 2 MB MAX_MESSAGE_SIZE = 4 * 1024 * 1024 # 4 MB def bytes_buffer_size(buffer: io.BytesIO) -> int: buffer.seek(0, io.SEEK_END) result = buffer.tell() buffer.seek(0) return result def send_bytes_in_chunks(buffer: bytes, message_class: Any, log_prefix: str = "", silent: bool = True): buffer = io.BytesIO(buffer) size_in_bytes = bytes_buffer_size(buffer) sent_bytes = 0 logging_method = logging.info if not silent else logging.debug logging_method(f"{log_prefix} Buffer size {size_in_bytes / 1024 / 1024} MB with") while sent_bytes < size_in_bytes: transfer_state = services_pb2.TransferState.TRANSFER_MIDDLE if sent_bytes + CHUNK_SIZE >= size_in_bytes: transfer_state = services_pb2.TransferState.TRANSFER_END elif sent_bytes == 0: transfer_state = services_pb2.TransferState.TRANSFER_BEGIN size_to_read = min(CHUNK_SIZE, size_in_bytes - sent_bytes) chunk = buffer.read(size_to_read) yield message_class(transfer_state=transfer_state, data=chunk) sent_bytes += size_to_read logging_method(f"{log_prefix} Sent {sent_bytes}/{size_in_bytes} bytes with state {transfer_state}") logging_method(f"{log_prefix} Published {sent_bytes / 1024 / 1024} MB") def receive_bytes_in_chunks(iterator, queue: Queue | None, shutdown_event: Event, log_prefix: str = ""): bytes_buffer = io.BytesIO() step = 0 logging.info(f"{log_prefix} Starting receiver") for item in iterator: logging.debug(f"{log_prefix} Received item") if shutdown_event.is_set(): logging.info(f"{log_prefix} Shutting down receiver") return if item.transfer_state == services_pb2.TransferState.TRANSFER_BEGIN: bytes_buffer.seek(0) bytes_buffer.truncate(0) bytes_buffer.write(item.data) logging.debug(f"{log_prefix} Received data at step 0") step = 0 elif item.transfer_state == services_pb2.TransferState.TRANSFER_MIDDLE: bytes_buffer.write(item.data) step += 1 logging.debug(f"{log_prefix} Received data at step {step}") elif item.transfer_state == services_pb2.TransferState.TRANSFER_END: bytes_buffer.write(item.data) logging.debug(f"{log_prefix} Received data at step end size {bytes_buffer_size(bytes_buffer)}") if queue is not None: queue.put(bytes_buffer.getvalue()) else: return bytes_buffer.getvalue() bytes_buffer.seek(0) bytes_buffer.truncate(0) step = 0 logging.debug(f"{log_prefix} Queue updated") else: logging.warning(f"{log_prefix} Received unknown transfer state {item.transfer_state}") raise ValueError(f"Received unknown transfer state {item.transfer_state}") def state_to_bytes(state_dict: dict[str, torch.Tensor]) -> bytes: """Convert model state dict to flat array for transmission""" buffer = io.BytesIO() torch.save(state_dict, buffer) return buffer.getvalue() def bytes_to_state_dict(buffer: bytes) -> dict[str, torch.Tensor]: buffer = io.BytesIO(buffer) buffer.seek(0) return torch.load(buffer, weights_only=True) def python_object_to_bytes(python_object: Any) -> bytes: return pickle.dumps(python_object) def bytes_to_python_object(buffer: bytes) -> Any: buffer = io.BytesIO(buffer) buffer.seek(0) obj = pickle.load(buffer) # nosec B301: Safe usage of pickle.load # Add validation checks here return obj def bytes_to_transitions(buffer: bytes) -> list[Transition]: buffer = io.BytesIO(buffer) buffer.seek(0) transitions = torch.load(buffer, weights_only=True) return transitions def transitions_to_bytes(transitions: list[Transition]) -> bytes: buffer = io.BytesIO() torch.save(transitions, buffer) return buffer.getvalue() def grpc_channel_options( max_receive_message_length: int = MAX_MESSAGE_SIZE, max_send_message_length: int = MAX_MESSAGE_SIZE, enable_retries: bool = True, initial_backoff: str = "0.1s", max_attempts: int = 5, backoff_multiplier: float = 2, max_backoff: str = "2s", ): service_config = { "methodConfig": [ { "name": [{}], # Applies to ALL methods in ALL services "retryPolicy": { "maxAttempts": max_attempts, # Max retries (total attempts = 5) "initialBackoff": initial_backoff, # First retry after 0.1s "maxBackoff": max_backoff, # Max wait time between retries "backoffMultiplier": backoff_multiplier, # Exponential backoff factor "retryableStatusCodes": [ "UNAVAILABLE", "DEADLINE_EXCEEDED", ], # Retries on network failures }, } ] } service_config_json = json.dumps(service_config) retries_option = 1 if enable_retries else 0 return [ ("grpc.max_receive_message_length", max_receive_message_length), ("grpc.max_send_message_length", max_send_message_length), ("grpc.enable_retries", retries_option), ("grpc.service_config", service_config_json), ]
lerobot/src/lerobot/transport/utils.py/0
{ "file_path": "lerobot/src/lerobot/transport/utils.py", "repo_id": "lerobot", "token_count": 2592 }
220
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import Any import numpy as np import rerun as rr def _init_rerun(session_name: str = "lerobot_control_loop") -> None: """Initializes the Rerun SDK for visualizing the control loop.""" batch_size = os.getenv("RERUN_FLUSH_NUM_BYTES", "8000") os.environ["RERUN_FLUSH_NUM_BYTES"] = batch_size rr.init(session_name) memory_limit = os.getenv("LEROBOT_RERUN_MEMORY_LIMIT", "10%") rr.spawn(memory_limit=memory_limit) def log_rerun_data(observation: dict[str | Any], action: dict[str | Any]): for obs, val in observation.items(): if isinstance(val, float): rr.log(f"observation.{obs}", rr.Scalar(val)) elif isinstance(val, np.ndarray): if val.ndim == 1: for i, v in enumerate(val): rr.log(f"observation.{obs}_{i}", rr.Scalar(float(v))) else: rr.log(f"observation.{obs}", rr.Image(val), static=True) for act, val in action.items(): if isinstance(val, float): rr.log(f"action.{act}", rr.Scalar(val)) elif isinstance(val, np.ndarray): for i, v in enumerate(val): rr.log(f"action.{act}_{i}", rr.Scalar(float(v)))
lerobot/src/lerobot/utils/visualization_utils.py/0
{ "file_path": "lerobot/src/lerobot/utils/visualization_utils.py", "repo_id": "lerobot", "token_count": 759 }
221
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.d from copy import deepcopy from uuid import uuid4 import numpy as np import pytest import torch from lerobot.datasets.online_buffer import OnlineBuffer, compute_sampler_weights # Some constants for OnlineBuffer tests. data_key = "data" data_shape = (2, 3) # just some arbitrary > 1D shape buffer_capacity = 100 fps = 10 def make_new_buffer( write_dir: str | None = None, delta_timestamps: dict[str, list[float]] | None = None ) -> tuple[OnlineBuffer, str]: if write_dir is None: write_dir = f"/tmp/online_buffer_{uuid4().hex}" buffer = OnlineBuffer( write_dir, data_spec={data_key: {"shape": data_shape, "dtype": np.dtype("float32")}}, buffer_capacity=buffer_capacity, fps=fps, delta_timestamps=delta_timestamps, ) return buffer, write_dir def make_spoof_data_frames(n_episodes: int, n_frames_per_episode: int) -> dict[str, np.ndarray]: new_data = { data_key: np.arange(n_frames_per_episode * n_episodes * np.prod(data_shape)).reshape(-1, *data_shape), OnlineBuffer.INDEX_KEY: np.arange(n_frames_per_episode * n_episodes), OnlineBuffer.EPISODE_INDEX_KEY: np.repeat(np.arange(n_episodes), n_frames_per_episode), OnlineBuffer.FRAME_INDEX_KEY: np.tile(np.arange(n_frames_per_episode), n_episodes), OnlineBuffer.TIMESTAMP_KEY: np.tile(np.arange(n_frames_per_episode) / fps, n_episodes), } return new_data def test_non_mutate(): """Checks that the data provided to the add_data method is copied rather than passed by reference. This means that mutating the data in the buffer does not mutate the original data. NOTE: If this test fails, it means some of the other tests may be compromised. For example, we can't trust a success case for `test_write_read`. """ buffer, _ = make_new_buffer() new_data = make_spoof_data_frames(2, buffer_capacity // 4) new_data_copy = deepcopy(new_data) buffer.add_data(new_data) buffer._data[data_key][:] += 1 assert all(np.array_equal(new_data[k], new_data_copy[k]) for k in new_data) def test_index_error_no_data(): buffer, _ = make_new_buffer() with pytest.raises(IndexError): buffer[0] def test_index_error_with_data(): buffer, _ = make_new_buffer() n_frames = buffer_capacity // 2 new_data = make_spoof_data_frames(1, n_frames) buffer.add_data(new_data) with pytest.raises(IndexError): buffer[n_frames] with pytest.raises(IndexError): buffer[-n_frames - 1] @pytest.mark.parametrize("do_reload", [False, True]) def test_write_read(do_reload: bool): """Checks that data can be added to the buffer and read back. If do_reload we delete the buffer object and load the buffer back from disk before reading. """ buffer, write_dir = make_new_buffer() n_episodes = 2 n_frames_per_episode = buffer_capacity // 4 new_data = make_spoof_data_frames(n_episodes, n_frames_per_episode) buffer.add_data(new_data) if do_reload: del buffer buffer, _ = make_new_buffer(write_dir) assert len(buffer) == n_frames_per_episode * n_episodes for i, item in enumerate(buffer): assert all(isinstance(item[k], torch.Tensor) for k in item) assert np.array_equal(item[data_key].numpy(), new_data[data_key][i]) def test_read_data_key(): """Tests that data can be added to a buffer and all data for a. specific key can be read back.""" buffer, _ = make_new_buffer() n_episodes = 2 n_frames_per_episode = buffer_capacity // 4 new_data = make_spoof_data_frames(n_episodes, n_frames_per_episode) buffer.add_data(new_data) data_from_buffer = buffer.get_data_by_key(data_key) assert isinstance(data_from_buffer, torch.Tensor) assert np.array_equal(data_from_buffer.numpy(), new_data[data_key]) def test_fifo(): """Checks that if data is added beyond the buffer capacity, we discard the oldest data first.""" buffer, _ = make_new_buffer() n_frames_per_episode = buffer_capacity // 4 n_episodes = 3 new_data = make_spoof_data_frames(n_episodes, n_frames_per_episode) buffer.add_data(new_data) n_more_episodes = 2 # Developer sanity check (in case someone changes the global `buffer_capacity`). assert (n_episodes + n_more_episodes) * n_frames_per_episode > buffer_capacity, ( "Something went wrong with the test code." ) more_new_data = make_spoof_data_frames(n_more_episodes, n_frames_per_episode) buffer.add_data(more_new_data) assert len(buffer) == buffer_capacity, "The buffer should be full." expected_data = {} for k in new_data: # Concatenate, left-truncate, then roll, to imitate the cyclical FIFO pattern in OnlineBuffer. expected_data[k] = np.roll( np.concatenate([new_data[k], more_new_data[k]])[-buffer_capacity:], shift=len(new_data[k]) + len(more_new_data[k]) - buffer_capacity, axis=0, ) for i, item in enumerate(buffer): assert all(isinstance(item[k], torch.Tensor) for k in item) assert np.array_equal(item[data_key].numpy(), expected_data[data_key][i]) def test_delta_timestamps_within_tolerance(): """Check that getting an item with delta_timestamps within tolerance succeeds. Note: Copied from `test_datasets.py::test_load_previous_and_future_frames_within_tolerance`. """ # Sanity check on global fps as we are assuming it is 10 here. assert fps == 10, "This test assumes fps==10" buffer, _ = make_new_buffer(delta_timestamps={"index": [-0.2, 0, 0.139]}) new_data = make_spoof_data_frames(n_episodes=1, n_frames_per_episode=5) buffer.add_data(new_data) buffer.tolerance_s = 0.04 item = buffer[2] data, is_pad = item["index"], item[f"index{OnlineBuffer.IS_PAD_POSTFIX}"] torch.testing.assert_close(data, torch.tensor([0, 2, 3]), msg="Data does not match expected values") assert not is_pad.any(), "Unexpected padding detected" def test_delta_timestamps_outside_tolerance_inside_episode_range(): """Check that getting an item with delta_timestamps outside of tolerance fails. We expect it to fail if and only if the requested timestamps are within the episode range. Note: Copied from `test_datasets.py::test_load_previous_and_future_frames_outside_tolerance_inside_episode_range` """ # Sanity check on global fps as we are assuming it is 10 here. assert fps == 10, "This test assumes fps==10" buffer, _ = make_new_buffer(delta_timestamps={"index": [-0.2, 0, 0.141]}) new_data = make_spoof_data_frames(n_episodes=1, n_frames_per_episode=5) buffer.add_data(new_data) buffer.tolerance_s = 0.04 with pytest.raises(AssertionError): buffer[2] def test_delta_timestamps_outside_tolerance_outside_episode_range(): """Check that copy-padding of timestamps outside of the episode range works. Note: Copied from `test_datasets.py::test_load_previous_and_future_frames_outside_tolerance_outside_episode_range` """ # Sanity check on global fps as we are assuming it is 10 here. assert fps == 10, "This test assumes fps==10" buffer, _ = make_new_buffer(delta_timestamps={"index": [-0.3, -0.24, 0, 0.26, 0.3]}) new_data = make_spoof_data_frames(n_episodes=1, n_frames_per_episode=5) buffer.add_data(new_data) buffer.tolerance_s = 0.04 item = buffer[2] data, is_pad = item["index"], item["index_is_pad"] assert torch.equal(data, torch.tensor([0, 0, 2, 4, 4])), "Data does not match expected values" assert torch.equal(is_pad, torch.tensor([True, False, False, True, True])), ( "Padding does not match expected values" ) # Arbitrarily set small dataset sizes, making sure to have uneven sizes. @pytest.mark.parametrize("offline_dataset_size", [1, 6]) @pytest.mark.parametrize("online_dataset_size", [0, 4]) @pytest.mark.parametrize("online_sampling_ratio", [0.0, 1.0]) def test_compute_sampler_weights_trivial( lerobot_dataset_factory, tmp_path, offline_dataset_size: int, online_dataset_size: int, online_sampling_ratio: float, ): offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=offline_dataset_size) online_dataset, _ = make_new_buffer() if online_dataset_size > 0: online_dataset.add_data( make_spoof_data_frames(n_episodes=2, n_frames_per_episode=online_dataset_size // 2) ) weights = compute_sampler_weights( offline_dataset, online_dataset=online_dataset, online_sampling_ratio=online_sampling_ratio ) if offline_dataset_size == 0 or online_dataset_size == 0: expected_weights = torch.ones(offline_dataset_size + online_dataset_size) elif online_sampling_ratio == 0: expected_weights = torch.cat([torch.ones(offline_dataset_size), torch.zeros(online_dataset_size)]) elif online_sampling_ratio == 1: expected_weights = torch.cat([torch.zeros(offline_dataset_size), torch.ones(online_dataset_size)]) expected_weights /= expected_weights.sum() torch.testing.assert_close(weights, expected_weights) def test_compute_sampler_weights_nontrivial_ratio(lerobot_dataset_factory, tmp_path): # Arbitrarily set small dataset sizes, making sure to have uneven sizes. offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=4) online_dataset, _ = make_new_buffer() online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2)) online_sampling_ratio = 0.8 weights = compute_sampler_weights( offline_dataset, online_dataset=online_dataset, online_sampling_ratio=online_sampling_ratio ) torch.testing.assert_close( weights, torch.tensor([0.05, 0.05, 0.05, 0.05, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) ) def test_compute_sampler_weights_nontrivial_ratio_and_drop_last_n(lerobot_dataset_factory, tmp_path): # Arbitrarily set small dataset sizes, making sure to have uneven sizes. offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=4) online_dataset, _ = make_new_buffer() online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2)) weights = compute_sampler_weights( offline_dataset, online_dataset=online_dataset, online_sampling_ratio=0.8, online_drop_n_last_frames=1 ) torch.testing.assert_close( weights, torch.tensor([0.05, 0.05, 0.05, 0.05, 0.2, 0.0, 0.2, 0.0, 0.2, 0.0, 0.2, 0.0]) ) def test_compute_sampler_weights_drop_n_last_frames(lerobot_dataset_factory, tmp_path): """Note: test copied from test_sampler.""" offline_dataset = lerobot_dataset_factory(tmp_path, total_episodes=1, total_frames=2) online_dataset, _ = make_new_buffer() online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2)) weights = compute_sampler_weights( offline_dataset, offline_drop_n_last_frames=1, online_dataset=online_dataset, online_sampling_ratio=0.5, online_drop_n_last_frames=1, ) torch.testing.assert_close(weights, torch.tensor([0.5, 0, 0.125, 0, 0.125, 0, 0.125, 0, 0.125, 0]))
lerobot/tests/datasets/test_online_buffer.py/0
{ "file_path": "lerobot/tests/datasets/test_online_buffer.py", "repo_id": "lerobot", "token_count": 4677 }
222
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import socket import threading import time import pytest import torch from torch.multiprocessing import Event, Queue from lerobot.configs.train import TrainRLServerPipelineConfig from lerobot.policies.sac.configuration_sac import SACConfig from lerobot.utils.transition import Transition from tests.utils import require_package def create_test_transitions(count: int = 3) -> list[Transition]: """Create test transitions for integration testing.""" transitions = [] for i in range(count): transition = Transition( state={"observation": torch.randn(3, 64, 64), "state": torch.randn(10)}, action=torch.randn(5), reward=torch.tensor(1.0 + i), done=torch.tensor(i == count - 1), # Last transition is done truncated=torch.tensor(False), next_state={"observation": torch.randn(3, 64, 64), "state": torch.randn(10)}, complementary_info={"step": torch.tensor(i), "episode_id": i // 2}, ) transitions.append(transition) return transitions def create_test_interactions(count: int = 3) -> list[dict]: """Create test interactions for integration testing.""" interactions = [] for i in range(count): interaction = { "episode_reward": 10.0 + i * 5, "step": i * 100, "policy_fps": 30.0 + i, "intervention_rate": 0.1 * i, "episode_length": 200 + i * 50, } interactions.append(interaction) return interactions def find_free_port(): """Finds a free port on the local machine.""" with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(("", 0)) # Bind to port 0 to let the OS choose a free port s.listen(1) port = s.getsockname()[1] return port @pytest.fixture def cfg(): cfg = TrainRLServerPipelineConfig() port = find_free_port() policy_cfg = SACConfig() policy_cfg.actor_learner_config.learner_host = "127.0.0.1" policy_cfg.actor_learner_config.learner_port = port policy_cfg.concurrency.actor = "threads" policy_cfg.concurrency.learner = "threads" policy_cfg.actor_learner_config.queue_get_timeout = 0.1 cfg.policy = policy_cfg return cfg @require_package("grpc") @pytest.mark.timeout(10) # force cross-platform watchdog def test_end_to_end_transitions_flow(cfg): from lerobot.scripts.rl.actor import ( establish_learner_connection, learner_service_client, push_transitions_to_transport_queue, send_transitions, ) from lerobot.scripts.rl.learner import start_learner from lerobot.transport.utils import bytes_to_transitions from tests.transport.test_transport_utils import assert_transitions_equal """Test complete transitions flow from actor to learner.""" transitions_actor_queue = Queue() transitions_learner_queue = Queue() interactions_queue = Queue() parameters_queue = Queue() shutdown_event = Event() learner_thread = threading.Thread( target=start_learner, args=(parameters_queue, transitions_learner_queue, interactions_queue, shutdown_event, cfg), ) learner_thread.start() policy_cfg = cfg.policy learner_client, channel = learner_service_client( host=policy_cfg.actor_learner_config.learner_host, port=policy_cfg.actor_learner_config.learner_port ) assert establish_learner_connection(learner_client, shutdown_event, attempts=5) send_transitions_thread = threading.Thread( target=send_transitions, args=(cfg, transitions_actor_queue, shutdown_event, learner_client, channel) ) send_transitions_thread.start() input_transitions = create_test_transitions(count=5) push_transitions_to_transport_queue(input_transitions, transitions_actor_queue) # Wait for learner to start time.sleep(0.1) shutdown_event.set() # Wait for learner to receive transitions learner_thread.join() send_transitions_thread.join() channel.close() received_transitions = [] while not transitions_learner_queue.empty(): received_transitions.extend(bytes_to_transitions(transitions_learner_queue.get())) assert len(received_transitions) == len(input_transitions) for i, transition in enumerate(received_transitions): assert_transitions_equal(transition, input_transitions[i]) @require_package("grpc") @pytest.mark.timeout(10) def test_end_to_end_interactions_flow(cfg): from lerobot.scripts.rl.actor import ( establish_learner_connection, learner_service_client, send_interactions, ) from lerobot.scripts.rl.learner import start_learner from lerobot.transport.utils import bytes_to_python_object, python_object_to_bytes """Test complete interactions flow from actor to learner.""" # Queues for actor-learner communication interactions_actor_queue = Queue() interactions_learner_queue = Queue() # Other queues required by the learner parameters_queue = Queue() transitions_learner_queue = Queue() shutdown_event = Event() # Start the learner in a separate thread learner_thread = threading.Thread( target=start_learner, args=(parameters_queue, transitions_learner_queue, interactions_learner_queue, shutdown_event, cfg), ) learner_thread.start() # Establish connection from actor to learner policy_cfg = cfg.policy learner_client, channel = learner_service_client( host=policy_cfg.actor_learner_config.learner_host, port=policy_cfg.actor_learner_config.learner_port ) assert establish_learner_connection(learner_client, shutdown_event, attempts=5) # Start the actor's interaction sending process in a separate thread send_interactions_thread = threading.Thread( target=send_interactions, args=(cfg, interactions_actor_queue, shutdown_event, learner_client, channel), ) send_interactions_thread.start() # Create and push test interactions to the actor's queue input_interactions = create_test_interactions(count=5) for interaction in input_interactions: interactions_actor_queue.put(python_object_to_bytes(interaction)) # Wait for the communication to happen time.sleep(0.1) # Signal shutdown and wait for threads to complete shutdown_event.set() learner_thread.join() send_interactions_thread.join() channel.close() # Verify that the learner received the interactions received_interactions = [] while not interactions_learner_queue.empty(): received_interactions.append(bytes_to_python_object(interactions_learner_queue.get())) assert len(received_interactions) == len(input_interactions) # Sort by a unique key to handle potential reordering in queues received_interactions.sort(key=lambda x: x["step"]) input_interactions.sort(key=lambda x: x["step"]) for received, expected in zip(received_interactions, input_interactions, strict=False): assert received == expected @require_package("grpc") @pytest.mark.parametrize("data_size", ["small", "large"]) @pytest.mark.timeout(10) def test_end_to_end_parameters_flow(cfg, data_size): from lerobot.scripts.rl.actor import establish_learner_connection, learner_service_client, receive_policy from lerobot.scripts.rl.learner import start_learner from lerobot.transport.utils import bytes_to_state_dict, state_to_bytes """Test complete parameter flow from learner to actor, with small and large data.""" # Actor's local queue to receive params parameters_actor_queue = Queue() # Learner's queue to send params from parameters_learner_queue = Queue() # Other queues required by the learner transitions_learner_queue = Queue() interactions_learner_queue = Queue() shutdown_event = Event() # Start the learner in a separate thread learner_thread = threading.Thread( target=start_learner, args=( parameters_learner_queue, transitions_learner_queue, interactions_learner_queue, shutdown_event, cfg, ), ) learner_thread.start() # Establish connection from actor to learner policy_cfg = cfg.policy learner_client, channel = learner_service_client( host=policy_cfg.actor_learner_config.learner_host, port=policy_cfg.actor_learner_config.learner_port ) assert establish_learner_connection(learner_client, shutdown_event, attempts=5) # Start the actor's parameter receiving process in a separate thread receive_params_thread = threading.Thread( target=receive_policy, args=(cfg, parameters_actor_queue, shutdown_event, learner_client, channel), ) receive_params_thread.start() # Create test parameters based on parametrization if data_size == "small": input_params = {"layer.weight": torch.randn(128, 64)} else: # "large" # CHUNK_SIZE is 2MB, so this tensor (4MB) will force chunking input_params = {"large_layer.weight": torch.randn(1024, 1024)} # Simulate learner having new parameters to send parameters_learner_queue.put(state_to_bytes(input_params)) # Wait for the actor to receive the parameters time.sleep(0.1) # Signal shutdown and wait for threads to complete shutdown_event.set() learner_thread.join() receive_params_thread.join() channel.close() # Verify that the actor received the parameters correctly received_params = bytes_to_state_dict(parameters_actor_queue.get()) assert received_params.keys() == input_params.keys() for key in input_params: assert torch.allclose(received_params[key], input_params[key])
lerobot/tests/rl/test_actor_learner.py/0
{ "file_path": "lerobot/tests/rl/test_actor_learner.py", "repo_id": "lerobot", "token_count": 3738 }
223
# Open R1 *A fully open reproduction of DeepSeek-R1. This repo is a work in progress, let's build it together!* **Table of Contents** 1. [Overview](#overview) 2. [Plan of attack](#plan-of-attack) 3. [Installation](#installation) 4. [Training models](#training-models) - [SFT](#sft) - [GRPO](#grpo) 5. [Evaluating models](#evaluating-models) 6. [Reproducing Deepseek's evaluation results](#reproducing-deepseeks-evaluation-results) 7. [Data generation](#data-generation) - [Generate data from a smol distilled R1 model](#generate-data-from-a-smol-distilled-r1-model) - [Generate data from DeepSeek-R1](#generate-data-from-deepseek-r1) 8. [Contributing](#contributing) ## Overview The goal of this repo is to build the missing pieces of the R1 pipeline such that everybody can reproduce and build on top of it. The project is simple by design and mostly consists of: - `src/open_r1`: contains the scripts to train models as well as generate synthetic data: - `grpo.py`: trains a model with GRPO on a given dataset. - `sft.py`: performs a simple SFT of a model on a dataset. - `generate.py`: generates synthetic data from a model using [Distilabel](https://github.com/argilla-io/distilabel). - `Makefile`: contains easy-to-run commands for each step in the R1 pipeline leveraging the scripts above. ### Plan of attack We will use the DeepSeek-R1 [tech report](https://github.com/deepseek-ai/DeepSeek-R1) as a guide, which can roughly be broken down into three main steps: * Step 1: replicate the R1-Distill models by distilling a high-quality corpus from DeepSeek-R1. * Step 2: replicate the pure RL pipeline that DeepSeek used to create R1-Zero. This will likely involve curating new, large-scale datasets for math, reasoning, and code. * Step 3: show we can go from base model to RL-tuned via multi-stage training. <center> <img src="assets/plan-of-attack.png" width="500"> </center> ## News ๐Ÿ—ž๏ธ * **๐Ÿง‘โ€๐Ÿณ [2025/05/26] (Step 1 completed!)** We release [**Mixture-of-Thoughts**](https://huggingface.co/datasets/open-r1/Mixture-of-Thoughts)--a curated reasoning dataset of 350k verified traces distilled from R1. The dataset spans tasks in mathematics, coding, and science, and is designed to teach language models to reason step-by-step. We also provide a recipe to train [OpenR1-Distill-7B](https://huggingface.co/open-r1/OpenR1-Distill-7B), which replicates the reasoning capabilities of [deepseek-ai/DeepSeek-R1-Distill-Qwen-7B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B) and marks the completion of step 1 in the Open R1 project. * **โšก๏ธ [2025/03/11] [(update #3)](https://huggingface.co/blog/open-r1/update-3):** We release the [**CodeForces-CoTs**](https://huggingface.co/datasets/open-r1/codeforces-cots) dataset of 10k competitive programming problems and 100k solutions distilled from R1. We also release IOI24: a new benchmark of _very_ hard problems from international olympiads. A 7B Qwen model trained on CodeForces-CoTs can outperform Claude 3.7 Sonnet on IOI24, while a 32B model can outperform R1 itself. * **โˆž [2025/02/10] [(update #2)](https://huggingface.co/blog/open-r1/update-2):** We release the [**OpenR1-Math-220k**](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k) dataset of 220k traces distilled from R1 on a new version of NuminaMath. Models trained on this dataset match the performance of DeepSeek's distilled ones. * **๐Ÿ”ฅ [2025/02/02] [(update #1)](https://huggingface.co/blog/open-r1/update-1):** We implement the first parts of the [training](https://github.com/huggingface/open-r1?tab=readme-ov-file#training-models), [inference](https://github.com/huggingface/open-r1?tab=readme-ov-file#data-generation), and [evaluation](https://github.com/huggingface/open-r1?tab=readme-ov-file#reproducing-deepseeks-evaluation-results) pipelines. Let's go! ## Installation > [!CAUTION] > Libraries rely on CUDA 12.4. If you see errors related to segmentation faults, double check the version your system is running with `nvcc --version`. To run the code in this project, first, create a Python virtual environment using e.g. `uv`. To install `uv`, follow the [UV Installation Guide](https://docs.astral.sh/uv/getting-started/installation/). > [!NOTE] > As a shortcut, run `make install` to setup development libraries (spelled out below). Afterwards, if everything is setup correctly you can try out the Open-R1 models. ```shell uv venv openr1 --python 3.11 && source openr1/bin/activate && uv pip install --upgrade pip ``` > [!TIP] > For Hugging Face cluster users, add `export UV_LINK_MODE=copy` to your `.bashrc` to suppress cache warnings from `uv` Next, install vLLM and FlashAttention: ```shell uv pip install vllm==0.8.5.post1 uv pip install setuptools && uv pip install flash-attn --no-build-isolation ``` This will also install PyTorch `v2.6.0` and it is **very important** to use this version since the vLLM binaries are compiled for it. You can then install the remaining dependencies for your specific use case via `pip install -e .[LIST OF MODES]`. For most contributors, we recommend: ```shell GIT_LFS_SKIP_SMUDGE=1 uv pip install -e ".[dev]" ``` Next, log into your Hugging Face and Weights and Biases accounts as follows: ```shell huggingface-cli login wandb login ``` Finally, check whether your system has Git LFS installed so that you can load and push models/datasets to the Hugging Face Hub: ```shell git-lfs --version ``` If it isn't installed, run: ```shell sudo apt-get install git-lfs ``` ## Training models > [!NOTE] > The training commands below are configured for a node of 8 x H100s (80GB). For different hardware and topologies, you may need to tune the batch size and number of gradient accumulation steps. We support training models with either DDP or DeepSpeed (ZeRO-2 and ZeRO-3). For example, to perform SFT on a dataset distilled from DeepSeek-R1 with reasoning traces such as [open-r1/Mixture-of-Thoughts](https://huggingface.co/datasets/open-r1/Mixture-of-Thoughts), run: ```shell # Train via command line accelerate launch --config_file=recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \ --model_name_or_path open-r1/Qwen2.5-Math-7B-RoPE-300k \ --dataset_name open-r1/Mixture-of-Thoughts \ --dataset_config all \ --eos_token '<|im_end|>' \ --learning_rate 4.0e-5 \ --num_train_epochs 5 \ --max_seq_length 32768 \ --per_device_train_batch_size 2 \ --gradient_checkpointing \ --bf16 \ --use_liger_kernel \ --output_dir data/OpenR1-Distill-7B # Train via YAML config accelerate launch --config_file recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \ --config recipes/OpenR1-Distill-7B/sft/config_distill.yaml ``` Currently, the following tasks are supported: * Supervised Fine-Tuning `sft` * Group Relative Policy Optimization `grpo` > [!TIP] > If you scale up/down the number of GPUs, we recommend also scaling up the per-device batch size or number of gradient accumulation steps to keep the global batch size constant. By default, these scripts will push each model to your Hugging Face Hub username, i.e. `{username}/{model_name}-{task}`. You can override the parameters in each YAML config by appending them to the command as follows: ```shell # Change the base model to a smaller variant accelerate launch --config_file recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \ --config recipes/OpenR1-Distill-7B/sft/config_distill.yaml \ --model_name_or_path Qwen/Qwen3-0.6B-Base \ --hub_model_id OpenR1-Distill-0.6B \ --output_dir data/OpenR1-Distill-0.6B ``` If you also wish to override the Weights and Biases default settings, you can do so as follows: ```shell accelerate launch --config_file recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \ --config recipes/OpenR1-Distill-7B/sft/config_distill.yaml --wandb_entity huggingface --wandb_project open-r1 --run_name Qwen2.5-1.5B-GRPO ``` **๐Ÿšจ WARNING ๐Ÿšจ** Most base models like `meta-llama/Llama-3.2-1B` do not have a chat template, so we set ChatML as the default during training. However, for Qwen base models like `Qwen/Qwen2.5-1.5B`, a chat template is pre-defined in the tokenizer, so the EOS token must be set accordingly, e.g. ```diff # Align EOS token with chat template for Qwen base models accelerate launch --config_file=recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \ --model_name_or_path Qwen/Qwen2.5-1.5B \ + --eos_token '<|im_end|>' --dataset_name open-r1/Mixture-of-Thoughts \ --dataset_config all \ --learning_rate 4.0e-5 \ --num_train_epochs 1 \ --max_seq_length 32768 \ --per_device_train_batch_size 16 \ --gradient_checkpointing \ --bf16 \ --use_liger_kernel \ --output_dir data/Qwen2.5-1.5B-Open-R1-Distill ``` If you wish to use a custom chat template (e.g. Llama or Gemma), then the chat template and associated EOS token must be provided: ```diff # Align EOS token with custom chat template accelerate launch --config_file=recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \ --model_name_or_path meta-llama/Llama-3.2-1B \ + --chat_template "$(cat llama_chat_template.jinja)" \ + --eos_token '<|eot_id|>' \ --dataset_name open-r1/Mixture-of-Thoughts \ --dataset_config all \ --learning_rate 4.0e-5 \ --num_train_epochs 1 \ --max_seq_length 32768 \ --per_device_train_batch_size 16 \ --gradient_checkpointing \ --bf16 \ --use_liger_kernel \ --output_dir data/Llama-3.2-1B-Open-R1-Distill ``` ### SFT distillation We provide a recipe to reproduce the reasoning capabilities of [deepseek-ai/DeepSeek-R1-Distill-Qwen-7B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B), starting from the same base model. To do so, run: ```shell ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/zero3.yaml \ src/open_r1/sft.py \ --config recipes/OpenR1-Distill-7B/sft/config_distill.yaml ``` The result will be a model like [open-r1/OpenR1-Distill-7B](https://huggingface.co/open-r1/OpenR1-Distill-7B), with the following downstream performance: | Model | AIME 2024 | MATH-500 | GPQA Diamond | LiveCodeBench v5 | |-----------------------------|-----------|----------|--------------|------------------| | OpenR1-Distill-7B | 52.7 | 89.0 | 52.8 | 39.4 | | DeepSeek-R1-Distill-Qwen-7B | 51.3 | 93.5 | 52.4 | 37.4 | You can adjust the YAML config to train on a different base model or dataset. ### GRPO We use TRL's [vLLM backend](https://huggingface.co/docs/trl/speeding_up_training?vllm+examples=GRPO#vllm-for-fast-generation-in-online-methods) to scale training to large models across multiple nodes. For single-node training of smol models across 8 GPUs, use `vllm_mode="colocate"` to run vLLM in the same process as the training script: ```shell ACCELERATE_LOG_LEVEL=info \ accelerate launch --config_file recipes/accelerate_configs/zero3.yaml \ src/open_r1/grpo.py --config recipes/DeepSeek-R1-Distill-Qwen-1.5B/grpo/config_demo.yaml \ --vllm_mode colocate ``` > [!WARNING] > The chat template used in the distilled DeepSeek models omits the contents of the reasoning block within the `<think>` and `</think>` tags. It also prefills the assistant response with `<think>` which interferes with the format reward function. To handle that, it is important to override the chat template as done in e.g. [recipes/DeepSeek-R1-Distill-Qwen-1.5B/grpo/config_demo.yaml](./recipes/DeepSeek-R1-Distill-Qwen-1.5B/grpo/config_demo.yaml). For multi-node training on N+1 nodes, with 1 node running the vLLM server and N nodes running training, we provide an example Slurm script. For example, to run the above example on 1+1 nodes with data parallelism, run: ```shell sbatch --nodes=2 slurm/train.slurm --model Qwen2.5-1.5B-Instruct --task grpo --config demo --accelerator zero2 --dp 8 --tp 1 ``` See the [Launching jobs on a Slurm cluster](#launching-jobs-on-a-slurm-cluster) section for more details. #### GRPO dataset filtering We provide support to filter datasets by generating and computing pass rate on veriable tasks, see this [README](scripts/pass_rate_filtering/README.md) #### ๐Ÿ‘จโ€๐Ÿ’ป Training with a code interpreter We provide a `code` reward function for executing code generated by the policy during training. Currently, this reward function targets code contests like [Codeforces](https://codeforces.com), where solutions are executed against a set of test cases and the overall success rate is returned as the final reward. To ensure safe execution, we support multiple sandbox providers: 1. [E2B](https://e2b.dev) - Fast, cloud-based sandboxes with focus on Python execution 2. [Morph](https://cloud.morph.so/web/) - Cloud-based sandboxes with broader language support - Python/JS/C++/Rust To use the code reward function, first install the necessary dependencies: ```shell uv pip install -e '.[code]' ``` ##### E2B Provider To use E2B sandboxes, create a `.env` file and add your E2B API token: ``` E2B_API_KEY="e2b_xxx" ``` ##### Morph Provider To use Morph, first install the morphcloud package: ```shell pip install morphcloud ``` Then add your Morph API token to the `.env` file: ``` MORPH_API_KEY="YOUR_MORPH_API_KEY" ``` To specify which provider to use, add the `provider_type` parameter in your configuration: ```yaml # For E2B provider_type: e2b # For Morph provider_type: morph ``` ##### Dataset Requirements Make sure your dataset contains a `verification_info` column with the following schema (adopted from PrimeIntellect's excellent [datasets](https://huggingface.co/collections/PrimeIntellect/synthetic-1-67a2c399cfdd6c9f7fae0c37) of verifiable problems): ```python { "language": "python", # Morph supports more languages including C++, Java, etc. "test_cases": [ { "input": "4\n4\n0001\n1000\n0011\n0111\n3\n010\n101\n0\n2\n00000\n00001\n4\n01\n001\n0001\n00001\n", "output": "1\n3 \n-1\n0\n\n2\n1 2 \n", "type": "stdin_stdout", } ], } ``` For example, to train a smol model on Python problems, start the vLLM server: ```shell CUDA_VISIBLE_DEVICES=0 trl vllm-serve --model Qwen/Qwen2.5-1.5B-Instruct ``` Then run training with: ```shell CUDA_VISIBLE_DEVICES=1,2,3,4,5,6,7 ACCELERATE_LOG_LEVEL=info \ accelerate launch --config_file recipes/accelerate_configs/zero2.yaml --num_processes=7 \ src/open_r1/grpo.py --config recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo_code.yaml ``` ##### Using Router Services It is possible to be rate limited when too many scripts are executed on sandbox services. For both providers, we offer router scripts that can be launched on a CPU node: For E2B: ```shell sbatch slurm/e2b_router.slurm ``` For Morph: ```shell sbatch slurm/morph_router.slurm ``` Then add the router URL in your training YAML config: ```yaml # For E2B e2b_router_url: 1.2.3.4:8000 # For Morph morph_router_url: 1.2.3.4:8000 ``` The port should match the one used when launching the router. All training jobs can share the same router IP which will ensure parallel executions are properly managed. #### Competitive Programming problems: IOI & CodeForces We provide `ioi_code_reward` and `cf_code_reward` reward functions for executing problems from [IOI](https://hf.co/datasets/open-r1/ioi) and [CodeForces](https://huggingface.co/datasets/open-r1/codeforces), respectively. You can use either [piston](https://github.com/engineer-man/piston) or Morph (currently IOI only) as your execution provider. ##### Piston To use Piston: 1. Get piston workers running, see [slurm/piston/README.md](./slurm/piston/README.md) 2. Set your environment variable `PISTON_ENDPOINTS` to `slurm` or to a list of piston worker endpoints For IOI: 3. In your configuration, use `ioi_provider: "piston"` For CodeForces: 3. Download the generated (hard) test cases: ``` # change PATH_TO_SAVE_TESTCASES. Increase --max-workers according to your machine's capacity huggingface-cli download open-r1/codeforces --repo-type=dataset --include='generated_tests/*.parquet' --max-workers=8 --local-dir PATH_TO_SAVE_TESTCASES ``` 4. Save the path in .env: ``` CF_TESTS_FOLDER=PATH_TO_SAVE_TESTCASES ``` ##### Morph Morph is a cloud-based solution that provides sandboxed environments for running code. To use it: 1. Install the Morph client: `pip install morphcloud` 2. Add your Morph API key to the `.env` file: `MORPH_API_KEY="your_key_here"` 3. In your configuration, use `ioi_provider: "morph"` ##### Example recipes For IOI: See the [example recipe](./recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo_code_ioi.yaml) for how to use the IOI reward function: ```shell ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/zero2.yaml \ --num_processes=7 src/open_r1/grpo.py \ --config recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo_code_ioi.yaml ``` For CodeForces: ```shell sbatch --job-name=cf-grpo --nodes=2 slurm/train.slurm --model Qwen2.5-Coder-7B-Instruct --task grpo --config codeforces --accelerator zero3 --dp 8 --tp 1 ``` ### Launching jobs on a Slurm cluster If you have access to a Slurm cluster, we provide a `slurm/train.slurm` script that will automatically queue training jobs for you. Here's how you can use it: ```shell sbatch --job-name=open_r1 --nodes=1 slurm/train.slurm --model {model_name} --task {task} --config {config_suffix} --accelerator {accelerator} ``` Here `{model_name}` and `{task}` are defined as above, while `{config_suffix}` refers to the specific config and `{accelerator}` refers to the choice of ๐Ÿค— Accelerate config in `recipes/accelerate_configs`. If you wish to override the default config parameters, you can provide them by appending a space-separated string like `'--arg1=value1 --arg2=value2'`. Here's a concrete example to run SFT on 1 node of 8 GPUs: ```shell sbatch --job-name=open_r1 --nodes=1 slurm/train.slurm --model OpenR1-Distill-7B --task sft --config distill --accelerator zero3 ``` You can scale the number of nodes by increasing the `--nodes` flag. For GRPO, we use 1 node for the vLLM server and N nodes for training. For example, to run GRPO on 1+1 nodes with mixed data and tensor parallelism, run: ```shell sbatch --job-name=open_r1 --nodes=2 slurm/train.slurm --model Qwen2.5-1.5B-Instruct --task grpo --config demo --accelerator zero2 --dp 4 --tp 2 ``` > [!NOTE] > The configuration in `slurm/train.slurm` is optimised for the Hugging Face Compute Cluster and may require tweaking to be adapted to your own compute nodes. ### Customising the dataset mixture To combine multiple datasets as a single training mixture, you can specify the `dataset_mixture` parameter in the YAML config file. Here's a template for how to do this: ```yaml dataset_mixture: datasets: # List of datasets to include in the mixture - id: dataset_1 # Hub dataset ID config: config_name_1 # Name of the dataset config split: split_1 # Split to use from the dataset columns: # Columns to keep - column_1 - column_2 weight: 0.25 # Fraction of dataset to use - id: dataset_2 config: config_name_2 split: split_2 columns: - column_1 - column_2 weight: 0.5 seed: 42 # Seed for shuffling the combined dataset test_split_size: 0.1 # Fraction of mixture to use for a test split ``` ## Evaluating models We use `lighteval` to evaluate models. For models which fit on a single GPU, run: ```shell export VLLM_WORKER_MULTIPROC_METHOD=spawn # Required for vLLM MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}" OUTPUT_DIR=data/evals/$MODEL # AIME 2024 TASK=aime24 lighteval vllm $MODEL_ARGS "lighteval|$TASK|0|0" \ --use-chat-template \ --output-dir $OUTPUT_DIR # MATH-500 TASK=math_500 lighteval vllm $MODEL_ARGS "lighteval|$TASK|0|0" \ --use-chat-template \ --output-dir $OUTPUT_DIR # GPQA Diamond TASK=gpqa:diamond lighteval vllm $MODEL_ARGS "lighteval|$TASK|0|0" \ --use-chat-template \ --output-dir $OUTPUT_DIR # LiveCodeBench lighteval vllm $MODEL_ARGS "extended|lcb:codegeneration|0|0" \ --use-chat-template \ --output-dir $OUTPUT_DIR ``` To increase throughput across multiple GPUs, use _data parallel_ as follows: ```shell NUM_GPUS=8 MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,data_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}" TASK=aime24 OUTPUT_DIR=data/evals/$MODEL lighteval vllm $MODEL_ARGS "lighteval|$TASK|0|0" \ --use-chat-template \ --output-dir $OUTPUT_DIR ``` For large models which require sharding across GPUs, use _tensor parallel_ and run: ```shell NUM_GPUS=8 MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,tensor_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}" TASK=aime24 OUTPUT_DIR=data/evals/$MODEL export VLLM_WORKER_MULTIPROC_METHOD=spawn lighteval vllm $MODEL_ARGS "lighteval|$TASK|0|0" \ --use-chat-template \ --output-dir $OUTPUT_DIR ``` You can also launch an evaluation with `make evaluate`, specifying the model, task, and optionally the parallelism technique and number of GPUs. To evaluate on a single GPU: ```shell make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 ``` To use Data Parallelism: ```shell make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 PARALLEL=data NUM_GPUS=8 ``` To use Tensor Parallelism: ```shell make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 PARALLEL=tensor NUM_GPUS=8 ``` ## Reproducing Deepseek's evaluation results The DeepSeek-R1 paper uses sampling with 4-64 responses per query to estimate `pass@1` accuracy, but does not specify the specific number of responses per benchmark. In the tables below, we estimate `pass@1` accuracy with the following number of responses per query: | Benchmark | Number of responses per query | |:-------------:|:-----------------------------:| | AIME 2024 | 64 | | MATH-500 | 4 | | GPQA Diamond | 8 | | LiveCodeBench | 16 | Note that for benchmarks like AIME24, it is important to sample many responses as there are only 30 problems and this can introduce high variance across repeated runs. The choice of how many responses to sample per prompt likely explains the small differences between our evaluation results and those reported by DeepSeek. ### AIME 2024 We are able to reproduce Deepseek's reported results on the AIME 2024 benchmark within ~1-3 standard deviations: | Model | AIME 2024 (๐Ÿค— LightEval) | AIME 2024 (DeepSeek Reported) | |:------------------------------|:------------------------:|:-----------------------------:| | DeepSeek-R1-Distill-Qwen-1.5B | 30.7 | 28.9 | | DeepSeek-R1-Distill-Qwen-7B | 50.8 | 55.5 | | DeepSeek-R1-Distill-Qwen-14B | 65.9 | 69.7 | | DeepSeek-R1-Distill-Qwen-32B | 69.7 | 72.6 | | DeepSeek-R1-Distill-Llama-8B | 43.9 | 41.7 | | DeepSeek-R1-Distill-Llama-70B | 63.0 | 70.0 | To reproduce these results use the following command: ```shell NUM_GPUS=1 # Set to 8 for 32B and 70B models MODEL=deepseek-ai/{model_name} MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilization=0.8,data_parallel_size=$NUM_GPUS,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}" OUTPUT_DIR=data/evals/$MODEL lighteval vllm $MODEL_ARGS "lighteval|aime24|0|0" \ --use-chat-template \ --output-dir $OUTPUT_DIR ``` Alternatively, you can launch Slurm jobs as follows: ```shell python scripts/run_benchmarks.py --model-id {model_id} --benchmarks aime24 ``` ### MATH-500 We are able to reproduce Deepseek's reported results on the MATH-500 benchmark within ~1-3 standard deviations: | Model | MATH-500 (๐Ÿค— LightEval) | MATH-500 (DeepSeek Reported) | |:------------------------------|:-----------------------:|:----------------------------:| | DeepSeek-R1-Distill-Qwen-1.5B | 83.1 | 83.9 | | DeepSeek-R1-Distill-Qwen-7B | 94.5 | 92.8 | | DeepSeek-R1-Distill-Qwen-14B | 94.1 | 93.9 | | DeepSeek-R1-Distill-Qwen-32B | 95.6 | 94.3 | | DeepSeek-R1-Distill-Llama-8B | 88.6 | 89.1 | | DeepSeek-R1-Distill-Llama-70B | 95.1 | 94.5 | To reproduce these results use the following command: ```shell export VLLM_WORKER_MULTIPROC_METHOD=spawn NUM_GPUS=1 # Set to 8 for 32B and 70B models MODEL=deepseek-ai/{model_name} MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilization=0.8,data_parallel_size=$NUM_GPUS,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}" OUTPUT_DIR=data/evals/$MODEL lighteval vllm $MODEL_ARGS "lighteval|math_500|0|0" \ --use-chat-template \ --output-dir $OUTPUT_DIR ``` Alternatively, you can launch Slurm jobs as follows: ```shell python scripts/run_benchmarks.py --model-id {model_id} --benchmarks math_500 ``` ### GPQA Diamond We are able to reproduce Deepseek's reported results on the GPQA Diamond benchmark within ~1-3 standard deviations: | Model | GPQA Diamond (๐Ÿค— LightEval) | GPQA Diamond (DeepSeek Reported) | |:------------------------------|:---------------------------:|:--------------------------------:| | DeepSeek-R1-Distill-Qwen-1.5B | 35.8 | 33.8 | | DeepSeek-R1-Distill-Qwen-7B | 50.5 | 49.1 | | DeepSeek-R1-Distill-Qwen-14B | 61.5 | 59.1 | | DeepSeek-R1-Distill-Qwen-32B | 63.1 | 62.1 | | DeepSeek-R1-Distill-Llama-8B | 46.7 | 49.0 | | DeepSeek-R1-Distill-Llama-70B | 67.4 | 65.2 | To reproduce these results use the following command: ```shell export VLLM_WORKER_MULTIPROC_METHOD=spawn NUM_GPUS=1 # Set to 8 for 32B and 70B models MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}" OUTPUT_DIR=data/evals/$MODEL lighteval vllm $MODEL_ARGS "lighteval|gpqa:diamond|0|0" \ --use-chat-template \ --output-dir $OUTPUT_DIR ``` ```shell python scripts/run_benchmarks.py --model-id {model_id} --benchmarks gpqa ``` ### LiveCodeBench We are able to reproduce Deepseek's reported results on the LiveCodeBench code generation benchmark within ~1-3 standard deviations: | Model | LiveCodeBench (๐Ÿค— LightEval) | LiveCodeBench (DeepSeek Reported) | |:------------------------------|:----------------------------:|:---------------------------------:| | DeepSeek-R1-Distill-Qwen-1.5B | 16.1 | 16.9 | | DeepSeek-R1-Distill-Qwen-7B | 37.4 | 37.6 | | DeepSeek-R1-Distill-Qwen-14B | 51.3 | 53.1 | | DeepSeek-R1-Distill-Qwen-32B | 56.0 | 57.2 | | DeepSeek-R1-Distill-Llama-8B | 37.4 | 39.6 | | DeepSeek-R1-Distill-Llama-70B | 55.9 | 57.5 | To reproduce these results use the following command: ```shell NUM_GPUS=1 # Set to 8 for 32B and 70B models, or data_parallel_size=8 with the smaller models for speed MODEL=deepseek-ai/{model_name} MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilization=0.8,data_parallel_size=$NUM_GPUS,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}" OUTPUT_DIR=data/evals/$MODEL lighteval vllm $MODEL_ARGS "extended|lcb:codegeneration|0|0" \ --use-chat-template \ --output-dir $OUTPUT_DIR ``` ```shell python scripts/run_benchmarks.py --model-id {model_id} --benchmarks lcb ``` ## Data generation ### Generate data from a smol distilled R1 model The following example can be run in 1xH100. First install the following dependencies: ```shell uv pip install "distilabel[vllm]>=1.5.2" ``` Now save the following snippet into a file named `pipeline.py` and run it with `python pipeline.py`. It will generate 4 outputs for each of the 10 examples (change the username for the repository to your org/user name): ```python from datasets import load_dataset from distilabel.models import vLLM from distilabel.pipeline import Pipeline from distilabel.steps.tasks import TextGeneration prompt_template = """\ You will be given a problem. Please reason step by step, and put your final answer within \boxed{}: {{ instruction }}""" dataset = load_dataset("AI-MO/NuminaMath-TIR", split="train").select(range(10)) model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B" # Exchange with another smol distilled r1 with Pipeline( name="distill-qwen-7b-r1", description="A pipeline to generate data from a distilled r1 model", ) as pipeline: llm = vLLM( model=model_id, tokenizer=model_id, extra_kwargs={ "tensor_parallel_size": 1, "max_model_len": 8192, }, generation_kwargs={ "temperature": 0.6, "max_new_tokens": 8192, }, ) prompt_column = "problem" text_generation = TextGeneration( llm=llm, template=prompt_template, num_generations=4, input_mappings={"instruction": prompt_column} if prompt_column is not None else {} ) if __name__ == "__main__": distiset = pipeline.run(dataset=dataset) distiset.push_to_hub(repo_id="username/numina-deepseek-r1-qwen-7b") ``` Take a look at the sample dataset at [HuggingFaceH4/numina-deepseek-r1-qwen-7b](https://huggingface.co/datasets/HuggingFaceH4/numina-deepseek-r1-qwen-7b). ### Generate data from DeepSeek-R1 To run the bigger DeepSeek-R1, we used 2 nodes, each with 8ร—H100 GPUs using the slurm file present in this repo at `slurm/generate.slurm`. First, install the dependencies: (for now we need to install the vllm dev wheel that [fixes the R1 cuda graph capture](https://github.com/vllm-project/vllm/commits/221d388cc5a836fa189305785ed7e887cea8b510/csrc/moe/moe_align_sum_kernels.cu)) ```shell pip install https://wheels.vllm.ai/221d388cc5a836fa189305785ed7e887cea8b510/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl --extra-index-url https://download.pytorch.org/whl/cu121 uv pip install "distilabel[vllm,ray,openai]>=1.5.2" ``` And then run the following command: ```shell sbatch slurm/generate.slurm \ --hf-dataset AI-MO/NuminaMath-TIR \ --temperature 0.6 \ --prompt-column problem \ --model deepseek-ai/DeepSeek-R1 \ --hf-output-dataset username/r1-dataset ``` > [!NOTE] > While the job is running, you can setup an SSH tunnel through the cluster login node to access the Ray dashboard from your computer running `ssh -L 8265:ray_ip_head_node:8265 <login_node>`, then browsing `http://localhost:8265` ### Data decontamination Following [s1: Simple test-time scaling](https://huggingface.co/papers/2501.19393) the data can be decontaminated using the script at: [scripts/decontaminate.py](./scripts/decontaminate.py), which decontaminates a dataset using 8-grams and deduplicate the data. Sample run: ```shell python scripts/decontaminate.py \ --dataset "open-r1/verifiable-coding-problems-python" \ --problem_column problem \ --cleanup ``` It will decontaminate against the benchmark datasets, and remove the contaminated samples afterwards. If no argument `--new_dataset_name` is provided, the same dataset will be reused, adding a `_decontaminated`. It runs against the prompt, which for this dataset is the column `problem`, but a different one can be provided. Arguments for the script: ```shell usage: decontaminate.py [-h] --dataset DATASET [--split SPLIT] [--ngram_size NGRAM_SIZE] [--problem_column PROBLEM_COLUMN] [--cleanup] [--new_dataset_name NEW_DATASET_NAME] options: -h, --help show this help message and exit --dataset DATASET Name of the dataset to check for contamination. --split SPLIT Split to check for contamination, defaults to `train`. --ngram_size NGRAM_SIZE Size of n-grams to build, defaults to 8. --problem_column PROBLEM_COLUMN Name of the column containing the problem (prompt). --cleanup Whether to remove the contaminated rows before pushing the dataset. --new_dataset_name NEW_DATASET_NAME New name for the dataset. If not provided, will reuse the name and add a `_decontaminated` to the name. ``` ## Contributing Contributions are welcome. Please refer to https://github.com/huggingface/open-r1/issues/23. ## Acknowledgements This project is built with the collective efforts of many groups and individuals in the open AI community. We are especially grateful to the vLLM and SGLang teams for creating high-performance tooling to scale the rollouts of GRPO. We also thank the teams at [OpenThoughts](https://www.open-thoughts.ai), [Prime Intellect](https://www.primeintellect.ai), and [General Reasoning](https://gr.inc) for creating and sharing high-quality datasets for reasoning. ## Citation If you find this project is useful in your own work, please consider citing as follows: ``` @misc{openr1, title = {Open R1: A fully open reproduction of DeepSeek-R1}, url = {https://github.com/huggingface/open-r1}, author = {{Hugging Face}}, month = {January}, year = {2025} } ```
open-r1/README.md/0
{ "file_path": "open-r1/README.md", "repo_id": "open-r1", "token_count": 13809 }
224
## Serving DeepSeek-R1 on 2x8 H100 SLURM nodes with SGLang 1. Set up the environment (adjust for your cuda version): ```bash conda create -n sglang124 python=3.11 conda activate sglang124 pip install torch==2.5.1 --index-url https://download.pytorch.org/whl/cu124 pip install sgl-kernel --force-reinstall --no-deps pip install "sglang[all]>=0.4.2.post4" --find-links https://flashinfer.ai/whl/cu124/torch2.5/flashinfer/ ``` 2. Run the server and wait for the model to load: ```bash sbatch slurm/serve_r1.slurm -m "/fsx/deepseek-r1-checkpoint" -e "sglang124" ``` 3. Run the data generation script: ```bash python scripts/generate_reasoning.py \ --dataset-name "AI-MO/NuminaMath-1.5" \ --output-file "numinamath_r1_generations.jsonl" \ --prompt-column "problem" \ --uuid-column "problem" \ --api-addr "<SGLANG_SERVER_ADDRESS>:39877" \ --num-generations 2 \ --max-tokens 16384 \ --max-concurrent 200 ```
open-r1/slurm/README.md/0
{ "file_path": "open-r1/slurm/README.md", "repo_id": "open-r1", "token_count": 390 }
225
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import datasets import transformers from transformers import set_seed from transformers.trainer_utils import get_last_checkpoint from open_r1.configs import GRPOConfig, GRPOScriptArguments from open_r1.rewards import get_reward_funcs from open_r1.utils import get_dataset, get_model, get_tokenizer from open_r1.utils.callbacks import get_callbacks from open_r1.utils.wandb_logging import init_wandb_training from trl import GRPOTrainer, ModelConfig, TrlParser, get_peft_config logger = logging.getLogger(__name__) def main(script_args, training_args, model_args): # Set seed for reproducibility set_seed(training_args.seed) ############### # Setup logging ############### logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process a small summary logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f" distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Model parameters {model_args}") logger.info(f"Script parameters {script_args}") logger.info(f"Training parameters {training_args}") # Check for last checkpoint last_checkpoint = None if os.path.isdir(training_args.output_dir): last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info(f"Checkpoint detected, resuming training at {last_checkpoint=}.") if "wandb" in training_args.report_to: init_wandb_training(training_args) # Load the dataset dataset = get_dataset(script_args) ################ # Load tokenizer ################ tokenizer = get_tokenizer(model_args, training_args) ############## # Load model # ############## logger.info("*** Loading model ***") model = get_model(model_args, training_args) # Get reward functions from the registry reward_funcs = get_reward_funcs(script_args) # Format into conversation def make_conversation(example, prompt_column: str = script_args.dataset_prompt_column): prompt = [] if training_args.system_prompt is not None: prompt.append({"role": "system", "content": training_args.system_prompt}) if prompt_column not in example: raise ValueError(f"Dataset Question Field Error: {prompt_column} is not supported.") prompt.append({"role": "user", "content": example[prompt_column]}) return {"prompt": prompt} dataset = dataset.map(make_conversation) for split in dataset: if "messages" in dataset[split].column_names: dataset[split] = dataset[split].remove_columns("messages") ############################# # Initialize the GRPO trainer ############################# trainer = GRPOTrainer( model=model, reward_funcs=reward_funcs, args=training_args, train_dataset=dataset[script_args.dataset_train_split], eval_dataset=(dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None), peft_config=get_peft_config(model_args), callbacks=get_callbacks(training_args, model_args), processing_class=tokenizer, ) ############### # Training loop ############### logger.info("*** Train ***") checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics metrics["train_samples"] = len(dataset[script_args.dataset_train_split]) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() ################################## # Save model and create model card ################################## logger.info("*** Save model ***") # Align the model's generation config with the tokenizer's eos token # to avoid unbounded generation in the transformers `pipeline()` function trainer.model.generation_config.eos_token_id = tokenizer.eos_token_id trainer.save_model(training_args.output_dir) logger.info(f"Model saved to {training_args.output_dir}") # Save everything else on main process kwargs = { "dataset_name": script_args.dataset_name, "tags": ["open-r1"], } if trainer.accelerator.is_main_process: trainer.create_model_card(**kwargs) # Restore k,v cache for fast inference trainer.model.config.use_cache = True trainer.model.config.save_pretrained(training_args.output_dir) ########## # Evaluate ########## if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() metrics["eval_samples"] = len(dataset[script_args.dataset_test_split]) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) ############# # push to hub ############# if training_args.push_to_hub: logger.info("Pushing to hub...") trainer.push_to_hub(**kwargs) if __name__ == "__main__": parser = TrlParser((GRPOScriptArguments, GRPOConfig, ModelConfig)) script_args, training_args, model_args = parser.parse_args_and_config() main(script_args, training_args, model_args)
open-r1/src/open_r1/grpo.py/0
{ "file_path": "open-r1/src/open_r1/grpo.py", "repo_id": "open-r1", "token_count": 2424 }
226
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import re from concurrent.futures import Future from transformers import AutoConfig from huggingface_hub import ( create_branch, create_repo, get_safetensors_metadata, list_repo_commits, list_repo_files, list_repo_refs, repo_exists, upload_folder, ) from trl import GRPOConfig, SFTConfig logger = logging.getLogger(__name__) def push_to_hub_revision(training_args: SFTConfig | GRPOConfig, extra_ignore_patterns=[]) -> Future: """Pushes the model to branch on a Hub repo.""" # Create a repo if it doesn't exist yet repo_url = create_repo(repo_id=training_args.hub_model_id, private=True, exist_ok=True) # Get initial commit to branch from initial_commit = list_repo_commits(training_args.hub_model_id)[-1] # Now create the branch we'll be pushing to create_branch( repo_id=training_args.hub_model_id, branch=training_args.hub_model_revision, revision=initial_commit.commit_id, exist_ok=True, ) logger.info(f"Created target repo at {repo_url}") logger.info(f"Pushing to the Hub revision {training_args.hub_model_revision}...") ignore_patterns = ["checkpoint-*", "*.pth"] ignore_patterns.extend(extra_ignore_patterns) future = upload_folder( repo_id=training_args.hub_model_id, folder_path=training_args.output_dir, revision=training_args.hub_model_revision, commit_message=f"Add {training_args.hub_model_revision} checkpoint", ignore_patterns=ignore_patterns, run_as_future=True, ) logger.info(f"Pushed to {repo_url} revision {training_args.hub_model_revision} successfully!") return future def check_hub_revision_exists(training_args: SFTConfig | GRPOConfig): """Checks if a given Hub revision exists.""" if repo_exists(training_args.hub_model_id): if training_args.push_to_hub_revision is True: # First check if the revision exists revisions = [rev.name for rev in list_repo_refs(training_args.hub_model_id).branches] # If the revision exists, we next check it has a README file if training_args.hub_model_revision in revisions: repo_files = list_repo_files( repo_id=training_args.hub_model_id, revision=training_args.hub_model_revision, ) if "README.md" in repo_files and training_args.overwrite_hub_revision is False: raise ValueError( f"Revision {training_args.hub_model_revision} already exists. " "Use --overwrite_hub_revision to overwrite it." ) def get_param_count_from_repo_id(repo_id: str) -> int: """Function to get model param counts from safetensors metadata or find patterns like 42m, 1.5b, 0.5m or products like 8x7b in a repo ID.""" try: metadata = get_safetensors_metadata(repo_id) return list(metadata.parameter_count.values())[0] except Exception: # Pattern to match products (like 8x7b) and single values (like 42m) pattern = r"((\d+(\.\d+)?)(x(\d+(\.\d+)?))?)([bm])" matches = re.findall(pattern, repo_id.lower()) param_counts = [] for full_match, number1, _, _, number2, _, unit in matches: if number2: # If there's a second number, it's a product number = float(number1) * float(number2) else: # Otherwise, it's a single value number = float(number1) if unit == "b": number *= 1_000_000_000 # Convert to billion elif unit == "m": number *= 1_000_000 # Convert to million param_counts.append(number) if len(param_counts) > 0: # Return the largest number return int(max(param_counts)) else: # Return -1 if no match found return -1 def get_gpu_count_for_vllm(model_name: str, revision: str = "main", num_gpus: int = 8) -> int: """vLLM enforces a constraint that the number of attention heads must be divisible by the number of GPUs and 64 must be divisible by the number of GPUs. This function calculates the number of GPUs to use for decoding based on the number of attention heads in the model. """ config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=True) # Get number of attention heads num_heads = config.num_attention_heads # Reduce num_gpus so that num_heads is divisible by num_gpus and 64 is divisible by num_gpus while num_heads % num_gpus != 0 or 64 % num_gpus != 0: logger.info(f"Reducing num_gpus from {num_gpus} to {num_gpus - 1} to make num_heads divisible by num_gpus") num_gpus -= 1 return num_gpus
open-r1/src/open_r1/utils/hub.py/0
{ "file_path": "open-r1/src/open_r1/utils/hub.py", "repo_id": "open-r1", "token_count": 2201 }
227
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Adapter injection With PEFT, you can inject trainable adapters into any `torch` module which allows you to use adapter methods without relying on the modeling classes in PEFT. This works for all adapters except for those based on prompt learning (e.g. prefix tuning or p-tuning). Check the table below to see when you should inject adapters. | Pros | Cons | |---|---| | the model is modified inplace, keeping all the original attributes and methods | manually write the `from_pretrained` and `save_pretrained` utility functions from Hugging Face to save and load adapters | | works for any `torch` module and modality | doesn't work with any of the utility methods provided by `PeftModel` such as disabling and merging adapters | ## Creating a new PEFT model To perform the adapter injection, use the [`inject_adapter_in_model`] method. This method takes 3 arguments, the PEFT config, the model, and an optional adapter name. You can also attach multiple adapters to the model if you call [`inject_adapter_in_model`] multiple times with different adapter names. For example, to inject LoRA adapters into the `linear` submodule of the `DummyModel` module: ```python import torch from peft import inject_adapter_in_model, LoraConfig class DummyModel(torch.nn.Module): def __init__(self): super().__init__() self.embedding = torch.nn.Embedding(10, 10) self.linear = torch.nn.Linear(10, 10) self.lm_head = torch.nn.Linear(10, 10) def forward(self, input_ids): x = self.embedding(input_ids) x = self.linear(x) x = self.lm_head(x) return x lora_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", target_modules=["linear"], ) model = DummyModel() model = inject_adapter_in_model(lora_config, model) dummy_inputs = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]) dummy_outputs = model(dummy_inputs) ``` Print the model to see that the adapters have been correctly injected. ```bash DummyModel( (embedding): Embedding(10, 10) (linear): Linear( in_features=10, out_features=10, bias=True (lora_dropout): ModuleDict( (default): Dropout(p=0.1, inplace=False) ) (lora_A): ModuleDict( (default): Linear(in_features=10, out_features=64, bias=False) ) (lora_B): ModuleDict( (default): Linear(in_features=64, out_features=10, bias=False) ) (lora_embedding_A): ParameterDict() (lora_embedding_B): ParameterDict() ) (lm_head): Linear(in_features=10, out_features=10, bias=True) ) ``` ### Injection based on a `state_dict` Sometimes, it is possible that there is a PEFT adapter checkpoint but the corresponding PEFT config is not known for whatever reason. To inject the PEFT layers for this checkpoint, you would usually have to reverse-engineer the corresponding PEFT config, most notably the `target_modules` argument, based on the `state_dict` from the checkpoint. This can be cumbersome and error prone. To avoid this, it is also possible to call [`inject_adapter_in_model`] and pass the loaded `state_dict` as an argument: ```python from safetensors.torch import load_file model = ... state_dict = load_file(<path-to-safetensors-file>) lora_config = LoraConfig(...) model = inject_adapter_in_model(lora_config, model, state_dict=state_dict) ``` In this case, PEFT will use the `state_dict` as reference for which layers to target instead of using the PEFT config. As a user, you don't have to set the exact `target_modules` of the PEFT config for this to work. However, you should still pass a PEFT config of the right type, in this example `LoraConfig`, you can leave the `target_modules` as `None`. Be aware that this still only creates the uninitialized PEFT layers, the values from the `state_dict` are not used to populate the model weights. To populate the weights, proceed with calling [`set_peft_model_state_dict`] as described below. โš ๏ธ Note that if there is a mismatch between what is configured in the PEFT config and what is found in the `state_dict`, PEFT will warn you about this. You can ignore the warning if you know that the PEFT config is not correctly specified. > [!WARNING] > If the original PEFT adapters was using `target_parameters` instead of `target_modules`, injecting from a `state_dict` will not work correctly. In this case, it is mandatory to use the correct PEFT config for injection. ## Saving the model To only save the adapter, use the [`get_peft_model_state_dict`] function: ```python from peft import get_peft_model_state_dict peft_state_dict = get_peft_model_state_dict(model) print(peft_state_dict) ``` Otherwise, `model.state_dict()` returns the full state dict of the model. ## Loading the model After loading the saved `state_dict`, it can be applied using the [`set_peft_model_state_dict`] function: ```python from peft import set_peft_model_state_dict model = DummyModel() model = inject_adapter_in_model(lora_config, model) outcome = set_peft_model_state_dict(model, peft_state_dict) # check that there were no wrong keys print(outcome.unexpected_keys) ``` If injecting the adapter is slow or you need to load a large number of adapters, you may use an optimization that allows to create an "empty" adapter on meta device and only fills the weights with real weights when the [`set_peft_model_state_dict`] is called. To do this, pass `low_cpu_mem_usage=True` to both [`inject_adapter_in_model`] and [`set_peft_model_state_dict`]. ```python model = DummyModel() model = inject_adapter_in_model(lora_config, model, low_cpu_mem_usage=True) print(model.linear.lora_A["default"].weight.device.type == "meta") # should be True set_peft_model_state_dict(model, peft_state_dict, low_cpu_mem_usage=True) print(model.linear.lora_A["default"].weight.device.type == "cpu") # should be True ```
peft/docs/source/developer_guides/low_level_api.md/0
{ "file_path": "peft/docs/source/developer_guides/low_level_api.md", "repo_id": "peft", "token_count": 2073 }
228
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PEFT integrations PEFT's practical benefits extends to other Hugging Face libraries like [Diffusers](https://hf.co/docs/diffusers) and [Transformers](https://hf.co/docs/transformers). One of the main benefits of PEFT is that an adapter file generated by a PEFT method is a lot smaller than the original model, which makes it super easy to manage and use multiple adapters. You can use one pretrained base model for multiple tasks by simply loading a new adapter finetuned for the task you're solving. Or you can combine multiple adapters with a text-to-image diffusion model to create new effects. This tutorial will show you how PEFT can help you manage adapters in Diffusers and Transformers. ## Diffusers Diffusers is a generative AI library for creating images and videos from text or images with diffusion models. LoRA is an especially popular training method for diffusion models because you can very quickly train and share diffusion models to generate images in new styles. To make it easier to use and try multiple LoRA models, Diffusers uses the PEFT library to help manage different adapters for inference. For example, load a base model and then load the [artificialguybr/3DRedmond-V1](https://huggingface.co/artificialguybr/3DRedmond-V1) adapter for inference with the [`load_lora_weights`](https://huggingface.co/docs/diffusers/v0.24.0/en/api/loaders/lora#diffusers.loaders.LoraLoaderMixin.load_lora_weights) method. The `adapter_name` argument in the loading method is enabled by PEFT and allows you to set a name for the adapter so it is easier to reference. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "peft-internal-testing/artificialguybr__3DRedmond-V1", weight_name="3DRedmond-3DRenderStyle-3DRenderAF.safetensors", adapter_name="3d" ) image = pipeline("sushi rolls shaped like kawaii cat faces").images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/test-lora-diffusers.png"/> </div> Now let's try another cool LoRA model, [ostris/super-cereal-sdxl-lora](https://huggingface.co/ostris/super-cereal-sdxl-lora). All you need to do is load and name this new adapter with `adapter_name`, and use the [`set_adapters`](https://huggingface.co/docs/diffusers/api/loaders/unet#diffusers.loaders.UNet2DConditionLoadersMixin.set_adapters) method to set it as the currently active adapter. ```py pipeline.load_lora_weights( "ostris/super-cereal-sdxl-lora", weight_name="cereal_box_sdxl_v1.safetensors", adapter_name="cereal" ) pipeline.set_adapters("cereal") image = pipeline("sushi rolls shaped like kawaii cat faces").images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/test-lora-diffusers-2.png"/> </div> Finally, you can call the [`disable_lora`](https://huggingface.co/docs/diffusers/api/loaders/unet#diffusers.loaders.UNet2DConditionLoadersMixin.disable_lora) method to restore the base model. ```py pipeline.disable_lora() ``` Learn more about how PEFT supports Diffusers in the [Inference with PEFT](https://huggingface.co/docs/diffusers/tutorials/using_peft_for_inference) tutorial. ## Transformers ๐Ÿค— [Transformers](https://hf.co/docs/transformers) is a collection of pretrained models for all types of tasks in all modalities. You can load these models for training or inference. Many of the models are large language models (LLMs), so it makes sense to integrate PEFT with Transformers to manage and train adapters. Load a base pretrained model to train. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") ``` Next, add an adapter configuration to specify how to adapt the model parameters. Call the [`~PeftModel.add_adapter`] method to add the configuration to the base model. ```py from peft import LoraConfig peft_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", task_type="CAUSAL_LM" ) model.add_adapter(peft_config) ``` Now you can train the model with Transformer's [`~transformers.Trainer`] class or whichever training framework you prefer. To use the newly trained model for inference, the [`~transformers.AutoModel`] class uses PEFT on the backend to load the adapter weights and configuration file into a base pretrained model. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("peft-internal-testing/opt-350m-lora") ``` Alternatively, you can use transformers [Pipelines](https://huggingface.co/docs/transformers/en/main_classes/pipelines) to load the model for conveniently running inference: ```py from transformers import pipeline model = pipeline("text-generation", "peft-internal-testing/opt-350m-lora") print(model("Hello World")) ``` If you're interested in comparing or using more than one adapter, you can call the [`~PeftModel.add_adapter`] method to add the adapter configuration to the base model. The only requirement is the adapter type must be the same (you can't mix a LoRA and LoHa adapter). ```py from transformers import AutoModelForCausalLM from peft import LoraConfig model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") model.add_adapter(lora_config_1, adapter_name="adapter_1") ``` Call [`~PeftModel.add_adapter`] again to attach a new adapter to the base model. ```py model.add_adapter(lora_config_2, adapter_name="adapter_2") ``` Then you can use [`~PeftModel.set_adapter`] to set the currently active adapter. ```py model.set_adapter("adapter_1") output = model.generate(**inputs) print(tokenizer.decode(output_disabled[0], skip_special_tokens=True)) ``` To disable the adapter, call the [disable_adapters](https://github.com/huggingface/transformers/blob/4e3490f79b40248c53ee54365a9662611e880892/src/transformers/integrations/peft.py#L313) method. ```py model.disable_adapters() ``` The [enable_adapters](https://github.com/huggingface/transformers/blob/4e3490f79b40248c53ee54365a9662611e880892/src/transformers/integrations/peft.py#L336) can be used to enable the adapters again. If you're curious, check out the [Load and train adapters with PEFT](https://huggingface.co/docs/transformers/main/peft) tutorial to learn more.
peft/docs/source/tutorial/peft_integrations.md/0
{ "file_path": "peft/docs/source/tutorial/peft_integrations.md", "repo_id": "peft", "token_count": 2255 }
229
<jupyter_start><jupyter_text>Peft model evaluation using [lm-eval-harness](https://github.com/EleutherAI/lm-evaluation-harness)In this notebook, we are going to learn how to evaluate the finetuned lora model on the hellaswag task using lm-eval-harness toolkit.<jupyter_code># Install LM-Eval !pip install -q datasets evaluate lm_eval<jupyter_output>[notice] A new release of pip is available: 24.0 -> 24.3.1 [notice] To update, run: python.exe -m pip install --upgrade pip<jupyter_text>First we will check the accuracy score on the hellaswag task for the base bert without finetuning<jupyter_code>import lm_eval output = lm_eval.simple_evaluate(model = 'hf', model_args = { 'pretrained' : 'bert-base-cased', 'dtype' : 'bfloat16'}, tasks = 'hellaswag', device = 'cuda:0', batch_size = 128, log_samples = False) output["results"]<jupyter_output>2024-11-01:20:45:03,210 INFO [evaluator.py:164] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 | Setting fewshot manual seed to 1234 2024-11-01:20:45:03,211 INFO [evaluator.py:188] Initializing hf model, with arguments: {'pretrained': 'bert-base-cased', 'dtype': 'bfloat16'} 2024-11-01:20:45:03,213 INFO [huggingface.py:129] Using device 'cuda:0' 2024-11-01:20:45:03,450 INFO [huggingface.py:481] Using model type 'default' 2024-11-01:20:45:03,741 INFO [huggingface.py:365] Model parallel was set to False, max memory was not set, and device map was set to {'': 'cuda:0'} If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.` 2024-11-01:20:45:15,862 INFO [task.py:415] Building contexts for hellaswag on rank 0... 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 10042/10042 [00:02<00:00, 4477.77it/s] 2024-11-01:20:45:18,875 INFO [evaluator.py:489] Running loglikelihood requests Running loglikelihood requests: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 40168/40[...]<jupyter_text>Now lets try to finetune the bert on the imdb dataset (this is for demonstration and finetuning on imdb may not increase the scores on hellaswag task)<jupyter_code># Import necessary libraries import evaluate import numpy as np from datasets import load_dataset from transformers import AutoTokenizer, BertForSequenceClassification, Trainer, TrainingArguments from peft import LoraConfig, TaskType, get_peft_model # Configure LoRA for Sequence Classification lora_config = LoraConfig( task_type=TaskType.SEQ_CLS, # Set task type to sequence classification target_modules=["query", "key"] # Specify target modules for LoRA tuning ) # Initialize the BERT model for sequence classification model = BertForSequenceClassification.from_pretrained( 'bert-base-cased', num_labels = 2 ) # Wrap the model with LoRA configuration model = get_peft_model(model, lora_config) model.print_trainable_parameters() tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") # load the dataset dataset = load_dataset("imdb") def tokenize_function(row): return tokenizer(row["text"], padding="max_length", truncation = True) tokenized_datasets = dataset.map(tokenize_function, batched = True) train_dataset = tokenized_datasets["train"] eval_dataset = tokenized_datasets["test"] # Define a function to compute evaluation metrics def compute_metrics(eval_pred): logits, labels = eval_pred predictions = np.argmax(logits, axis=-1) metric = evaluate.load("accuracy") return metric.compute(predictions = predictions, references = labels) # Configure training arguments training_args = TrainingArguments("bert-lora-imdb", eval_strategy="epoch", per_device_train_batch_size=32, # decrease this for OOM error per_device_eval_batch_size=64, save_strategy="epoch", learning_rate=2e-3, num_train_epochs=5, weight_decay=0.01, load_best_model_at_end=True, do_eval=True, do_predict=True, metric_for_best_model="accuracy", report_to="none") # Initialize the Trainer for the model training loop trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_metrics, ) #start training trainer.train()<jupyter_output>13%|โ–ˆโ–Ž | 500/3910 [08:16<56:48, 1.00it/s]<jupyter_text>Now take the finetuned lora checkpoint and check the accuracy score on hellaswag task.<jupyter_code># use the path of your checkpoint here output = lm_eval.simple_evaluate(model = 'hf', model_args = { 'pretrained' : 'bert-base-cased', 'peft' : './bert-lora-imdb/checkpoint-3910', 'dtype' : 'bfloat16'}, tasks = 'hellaswag', device = 'cuda:0', batch_size = 128, log_samples = False) output["results"]<jupyter_output>2024-11-01:23:37:57,640 INFO [evaluator.py:164] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 | Setting fewshot manual seed to 1234 2024-11-01:23:37:57,641 INFO [evaluator.py:188] Initializing hf model, with arguments: {'pretrained': 'bert-base-cased', 'peft': './bert-lora-imdb/checkpoint-3910', 'dtype': 'bfloat16'} 2024-11-01:23:37:57,643 INFO [huggingface.py:129] Using device 'cuda:0' 2024-11-01:23:37:57,891 INFO [huggingface.py:481] Using model type 'default' 2024-11-01:23:37:58,161 INFO [huggingface.py:365] Model parallel was set to False, max memory was not set, and device map was set to {'': 'cuda:0'} If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.` 2024-11-01:23:38:10,295 INFO [task.py:415] Building contexts for hellaswag on rank 0... 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 10042/10042 [00:02<00:00, 4453.89it/s] 2024-11-01:23:38:13,313 INFO [evaluator.py:489] Running loglikelihood requests Running logli[...]
peft/examples/evaluation/lora-lm-eval.ipynb/0
{ "file_path": "peft/examples/evaluation/lora-lm-eval.ipynb", "repo_id": "peft", "token_count": 2454 }
230
<jupyter_start><jupyter_text>IntroductionIn this notebook, we will learn how to use [LoRA](https://huggingface.co/papers/2106.09685) from ๐Ÿค— PEFT to fine-tune an image classification model by ONLY using **0.77%** of the original trainable parameters of the model. LoRA adds low-rank "update matrices" to certain blocks in the underlying model (in this case the attention blocks) and ONLY trains those matrices during fine-tuning. During inference, these update matrices are _merged_ with the original model parameters. For more details, check out the [original LoRA paper](https://huggingface.co/papers/2106.09685). Let's get started by installing the dependencies. __*Note that this notebook builds on top the [official image classification example notebook](https://github.com/huggingface/notebooks/blob/main/examples/image_classification.ipynb).*__ Install dependenciesHere we're installing `peft` from source to ensure we have access to all the bleeding edge features of `peft`.<jupyter_code>!pip install transformers accelerate evaluate datasets git+https://github.com/huggingface/peft -q<jupyter_output>Installing build dependencies ... [?25l[?25hdone Getting requirements to build wheel ... [?25l[?25hdone Preparing metadata (pyproject.toml) ... [?25l[?25hdone  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 6.3/6.3 MB 53.1 MB/s eta 0:00:00  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 199.7/199.7 KB 24.5 MB/s eta 0:00:00  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 81.4/81.4 KB 11.3 MB/s eta 0:00:00  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 462.8/462.8 KB 46.9 MB/s eta 0:00:00  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 190.3/190.3 KB 23.1 MB/s eta 0:00:00  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 7.6/7.6 MB 102.9 MB/s eta 0:00:00  โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ” 213.0/213.0 KB 25.4 MB/s eta [[...]<jupyter_text>AuthenticationWe will share our fine-tuned model at the end of training. So, to do that we just authenticate using our ๐Ÿค— token. This token is available from [here](https://huggingface.co/settings/tokens). If you don't have a ๐Ÿค— account already, we highly encourage you to do so; it's free!<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output>Token is valid. Your token has been saved in your configured git credential helpers (store). Your token has been saved to /root/.cache/huggingface/token Login successful<jupyter_text>Check the library versions<jupyter_code>import transformers import accelerate import peft print(f"Transformers version: {transformers.__version__}") print(f"Accelerate version: {accelerate.__version__}") print(f"PEFT version: {peft.__version__}")<jupyter_output>Transformers version: 4.26.0 Accelerate version: 0.16.0 PEFT version: 0.1.0.dev0<jupyter_text>Select a model checkpoint to fine-tune<jupyter_code>model_checkpoint = "google/vit-base-patch16-224-in21k" # pre-trained model from which to fine-tune<jupyter_output><empty_output><jupyter_text>Load a datasetWe're only loading the first 5000 instances from the training set of the [Food-101 dataset](https://huggingface.co/datasets/food101) to keep this example runtime short.<jupyter_code>from datasets import load_dataset dataset = load_dataset("food101", split="train[:5000]")<jupyter_output><empty_output><jupyter_text>Prepare datasets for training and evaluation 1. Prepare `label2id` and `id2label` dictionaries. This will come in handy when performing inference and for metadata information.<jupyter_code>labels = dataset.features["label"].names label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = i id2label[i] = label id2label[2]<jupyter_output><empty_output><jupyter_text>2. We load the image processor of the model we're fine-tuning.<jupyter_code>from transformers import AutoImageProcessor image_processor = AutoImageProcessor.from_pretrained(model_checkpoint) image_processor<jupyter_output><empty_output><jupyter_text>As one might notice, the `image_processor` has useful information on which size the training and evaluation images should be resized, stats that should be used to normalize the pixel values, etc. 3. Using the image processor we prepare transformation functions for the datasets. These functions will include augmentation and pixel scaling.<jupyter_code>from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) train_transforms = Compose( [ RandomResizedCrop(image_processor.size["height"]), RandomHorizontalFlip(), ToTensor(), normalize, ] ) val_transforms = Compose( [ Resize(image_processor.size["height"]), CenterCrop(image_processor.size["height"]), ToTensor(), normalize, ] ) def preprocess_train(example_batch): """Apply train_transforms across a batch.""" example_batch["pixel_values"] = [train_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch def preprocess_val(example_batch): """Apply val_transforms across a batch.""" example_batch["pixel_values"] = [val_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch<jupyter_output><empty_output><jupyter_text>4. We split our mini dataset into training and validation.<jupyter_code># split up training into training + validation splits = dataset.train_test_split(test_size=0.1) train_ds = splits["train"] val_ds = splits["test"]<jupyter_output><empty_output><jupyter_text>5. We set the transformation functions to the datasets accordingly.<jupyter_code>train_ds.set_transform(preprocess_train) val_ds.set_transform(preprocess_val)<jupyter_output><empty_output><jupyter_text>Load and prepare a model In this section, we first load the model we want to fine-tune.<jupyter_code>def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}" )<jupyter_output><empty_output><jupyter_text>The `get_peft_model()` method that we will use in a moment wraps the original model to be fine-tuned as a `PeftModel`. So, it's important for us to initialize the original model correctly. As such, we initialize it by specifying the `label2id` and `id2label` so that `AutoModelForImageClassification` can initialize a append classification head to the underlying model, adapted for our dataset. We can confirm this from the warning below:```Some weights of ViTForImageClassification were not initialized from the model checkpoint at google/vit-base-patch16-224-in21k and are newly initialized: ['classifier.weight', 'classifier.bias']```<jupyter_code>from transformers import AutoModelForImageClassification, TrainingArguments, Trainer model = AutoModelForImageClassification.from_pretrained( model_checkpoint, label2id=label2id, id2label=id2label, ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint ) print_trainable_parameters(model)<jupyter_output><empty_output><jupyter_text>Also, take note of the number of total trainable parameters of `model`: it's 100%! We'll compare this number to that of the LoRA model.We now use the `PeftModel` to wrap `model` so that the "update" matrices are added to the respective places.<jupyter_code>from peft import LoraConfig, get_peft_model config = LoraConfig( r=16, lora_alpha=16, target_modules=["query", "value"], lora_dropout=0.1, bias="none", modules_to_save=["classifier"], ) lora_model = get_peft_model(model, config) print_trainable_parameters(lora_model)<jupyter_output>trainable params: 667493 || all params: 86466149 || trainable%: 0.77<jupyter_text>Let's unpack what's going on here. In order for LoRA to take effect, we need to specify the target modules to `LoraConfig` so that `get_peft_model()` knows which modules inside our model needs to be amended with LoRA matrices. In this case, we're only interested in targetting the query and value matrices of the attention blocks of the base model. Since the parameters corresponding to these matrices are "named" with `query` and `value` respectively, we specify them accordingly in the `target_modules` argument of `LoraConfig`. We also specify `modules_to_save`. After we wrap our base model `model` with `get_peft_model()` along with the `config`, we get a new model where only the LoRA parameters are trainable (so-called "update matrices") while the pre-trained parameters are kept frozen. These include the parameters of the randomly initialized classifier parameters too. This is NOT we want when fine-tuning the base model on our custom dataset. To ensure that the classifier parameters are also trained, we specify `modules_to_save`. This also ensures that these modules are serialized alongside the LoRA trainable parameters when using utilities like `save_pretrained()` and `push_to_hub()`. Regarding the other parameters:* `r`: The dimension used by the LoRA update matrices.* `alpha`: Scaling factor.* `bias`: Specifying if the `bias` parameters should be trained. `None` denotes none of the `bias` parameters will be trained. `r` and `alpha` together control the total number of final trainable parameters when using LoRA giving us the flexbility to balance a trade-off between end performance and compute efficiency. We can also how many parameters we're actually training. Since we're interested in performing **parameter-efficient fine-tuning**, we should expect to notice a less number of trainable parameters from the `lora_model` in comparison to the original `model` which is indeed the case here. Training argumentsWe will leverage [๐Ÿค— Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) for fine-tuning. It accepts several arguments which we wrap using [`TrainingArguments`](https://huggingface.co/docs/transformers/main_classes/trainertransformers.TrainingArguments).<jupyter_code>from transformers import TrainingArguments, Trainer model_name = model_checkpoint.split("/")[-1] batch_size = 128 args = TrainingArguments( f"{model_name}-finetuned-lora-food101", remove_unused_columns=False, eval_strategy="epoch", save_strategy="epoch", learning_rate=5e-3, per_device_train_batch_size=batch_size, gradient_accumulation_steps=4, per_device_eval_batch_size=batch_size, fp16=True, num_train_epochs=5, logging_steps=10, load_best_model_at_end=True, metric_for_best_model="accuracy", push_to_hub=True, label_names=["labels"], )<jupyter_output><empty_output><jupyter_text>Some things to note here:* We're using a larger batch size since there is only a handful of parameters to train. * Larger learning rate than the normal (1e-5 for example). All of these things are a byproduct of the fact that we're training only a small number of parameters. This can potentially also reduce the need to conduct expensive hyperparameter tuning experiments. Prepare evaluation metric<jupyter_code>import numpy as np import evaluate metric = evaluate.load("accuracy") # the compute_metrics function takes a Named Tuple as input: # predictions, which are the logits of the model as Numpy arrays, # and label_ids, which are the ground-truth labels as Numpy arrays. def compute_metrics(eval_pred): """Computes accuracy on a batch of predictions""" predictions = np.argmax(eval_pred.predictions, axis=1) return metric.compute(predictions=predictions, references=eval_pred.label_ids)<jupyter_output><empty_output><jupyter_text>Collation functionThis is used by `Trainer` to gather a batch of training and evaluation examples and prepare them in a format that is acceptable by the underlying model.<jupyter_code>import torch def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) labels = torch.tensor([example["label"] for example in examples]) return {"pixel_values": pixel_values, "labels": labels}<jupyter_output><empty_output><jupyter_text>Train and evaluate<jupyter_code>trainer = Trainer( lora_model, args, train_dataset=train_ds, eval_dataset=val_ds, processing_class=image_processor, compute_metrics=compute_metrics, data_collator=collate_fn, ) train_results = trainer.train()<jupyter_output>Cloning https://huggingface.co/sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101 into local empty directory. WARNING:huggingface_hub.repository:Cloning https://huggingface.co/sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101 into local empty directory.<jupyter_text>In just a few minutes, we have a fine-tuned model with 96% validation accuracy. Also, note that we used a very small subset of the training dataset which is definitely impacting the results.<jupyter_code>trainer.evaluate(val_ds)<jupyter_output>***** Running Evaluation ***** Num examples = 500 Batch size = 128<jupyter_text>Sharing your model and inference Once the fine-tuning is done, we can share the LoRA parameters with the community like so:<jupyter_code>repo_name = f"sayakpaul/{model_name}-finetuned-lora-food101" lora_model.push_to_hub(repo_name)<jupyter_output>Uploading the following files to sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101: adapter_config.json,adapter_model.bin<jupyter_text>When we call `push_to_hub()` on the `lora_model`, only the LoRA parameters along with any modules specified in `modules_to_save` are saved. If we take a look at the [trained LoRA parameters](https://huggingface.co/sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101/blob/main/adapter_model.bin), we see that it's only **2.6 MB**! This greatly helps with portability especially when we're using a very large model to fine-tune (such as [BLOOM](https://huggingface.co/bigscience/bloom)). Next, we see how to load the LoRA updated parameters along with our base model for inference. When we wrap a base model with `PeftModel` that modifications are DONE in place. So to mitigate any concerns that might stem from in place modifications, we newly initialize our base model just like we did earlier and construct our inference model.<jupyter_code>from peft import PeftConfig, PeftModel config = PeftConfig.from_pretrained(repo_name) model = model = AutoModelForImageClassification.from_pretrained( config.base_model_name_or_path, label2id=label2id, id2label=id2label, ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint ) # Load the Lora model inference_model = PeftModel.from_pretrained(model, repo_name)<jupyter_output>loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--google--vit-base-patch16-224-in21k/snapshots/1ba429d32753f33a0660b80ac6f43a3c80c18938/config.json Model config ViTConfig { "_name_or_path": "google/vit-base-patch16-224-in21k", "architectures": [ "ViTModel" ], "attention_probs_dropout_prob": 0.0, "encoder_stride": 16, "hidden_act": "gelu", "hidden_dropout_prob": 0.0, "hidden_size": 768, "id2label": { "0": "apple_pie", "1": "baby_back_ribs", "2": "baklava", "3": "beef_carpaccio", "4": "beef_tartare", "5": "beet_salad", "6": "beignets", "7": "bibimbap", "8": "bread_pudding", "9": "breakfast_burrito", "10": "bruschetta", "11": "caesar_salad", "12": "cannoli", "13": "caprese_salad", "14": "carrot_cake", "15": "ceviche", "16": "cheesecake", "17": "cheese_plate", "18": "chicken_curry", "19": "chicken_quesadilla", "20": "chicken_wings", "21": "ch[...]<jupyter_text>Don't worry about the warnings, they're harmless. Let's now fetch a sample for inference.<jupyter_code>from PIL import Image import requests url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/beignets.jpeg" image = Image.open(requests.get(url, stream=True).raw) image<jupyter_output><empty_output><jupyter_text>We first instantiate an `image_processor` from the underlying model repo.<jupyter_code>image_processor = AutoImageProcessor.from_pretrained(repo_name)<jupyter_output>loading configuration file preprocessor_config.json from cache at /root/.cache/huggingface/hub/models--sayakpaul--vit-base-patch16-224-in21k-finetuned-lora-food101/snapshots/fa2503cc7d91e0dd69728c1dc66ed80d7bd3289b/preprocessor_config.json Image processor ViTImageProcessor { "do_normalize": true, "do_rescale": true, "do_resize": true, "image_mean": [ 0.5, 0.5, 0.5 ], "image_processor_type": "ViTImageProcessor", "image_std": [ 0.5, 0.5, 0.5 ], "resample": 2, "rescale_factor": 0.00392156862745098, "size": { "height": 224, "width": 224 } }<jupyter_text>We then prepare the sample for inference.<jupyter_code># prepare image for the model encoding = image_processor(image.convert("RGB"), return_tensors="pt") print(encoding.pixel_values.shape)<jupyter_output>torch.Size([1, 3, 224, 224])<jupyter_text>And run inference!<jupyter_code>import torch # forward pass with torch.no_grad(): outputs = inference_model(**encoding) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", inference_model.config.id2label[predicted_class_idx])<jupyter_output>Predicted class: beignets
peft/examples/image_classification/image_classification_peft_lora.ipynb/0
{ "file_path": "peft/examples/image_classification/image_classification_peft_lora.ipynb", "repo_id": "peft", "token_count": 6375 }
231
import argparse import os import torch from diffusers import UNet2DConditionModel from safetensors.torch import save_file from transformers import CLIPTextModel from peft import PeftModel, get_peft_model_state_dict # Default kohya_ss LoRA replacement modules # https://github.com/kohya-ss/sd-scripts/blob/c924c47f374ac1b6e33e71f82948eb1853e2243f/networks/lora.py#L664 LORA_PREFIX_UNET = "lora_unet" LORA_PREFIX_TEXT_ENCODER = "lora_te" LORA_ADAPTER_NAME = "default" def get_module_kohya_state_dict( module: PeftModel, prefix: str, dtype: torch.dtype, adapter_name: str = LORA_ADAPTER_NAME ) -> dict[str, torch.Tensor]: kohya_ss_state_dict = {} for peft_key, weight in get_peft_model_state_dict(module, adapter_name=adapter_name).items(): kohya_key = peft_key.replace("base_model.model", prefix) kohya_key = kohya_key.replace("lora_A", "lora_down") kohya_key = kohya_key.replace("lora_B", "lora_up") kohya_key = kohya_key.replace(".", "_", kohya_key.count(".") - 2) kohya_ss_state_dict[kohya_key] = weight.to(dtype) # Set alpha parameter if "lora_down" in kohya_key: alpha_key = f"{kohya_key.split('.')[0]}.alpha" kohya_ss_state_dict[alpha_key] = torch.tensor(module.peft_config[adapter_name].lora_alpha).to(dtype) return kohya_ss_state_dict if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--sd_checkpoint", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--sd_checkpoint_revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument("--peft_lora_path", default=None, type=str, required=True, help="Path to peft trained LoRA") parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to the output safetensors file for use with webui.", ) parser.add_argument("--half", action="store_true", help="Save weights in half precision.") args = parser.parse_args() # Store kohya_ss state dict kohya_ss_state_dict = {} dtype = torch.float16 if args.half else torch.float32 # Load Text Encoder LoRA model text_encoder_peft_lora_path = os.path.join(args.peft_lora_path, "text_encoder") if os.path.exists(text_encoder_peft_lora_path): text_encoder = CLIPTextModel.from_pretrained( args.sd_checkpoint, subfolder="text_encoder", revision=args.sd_checkpoint_revision ) text_encoder = PeftModel.from_pretrained( text_encoder, text_encoder_peft_lora_path, adapter_name=LORA_ADAPTER_NAME ) kohya_ss_state_dict.update( get_module_kohya_state_dict(text_encoder, LORA_PREFIX_TEXT_ENCODER, dtype, LORA_ADAPTER_NAME) ) # Load UNet LoRA model unet_peft_lora_path = os.path.join(args.peft_lora_path, "unet") if os.path.exists(unet_peft_lora_path): unet = UNet2DConditionModel.from_pretrained( args.sd_checkpoint, subfolder="unet", revision=args.sd_checkpoint_revision ) unet = PeftModel.from_pretrained(unet, unet_peft_lora_path, adapter_name=LORA_ADAPTER_NAME) kohya_ss_state_dict.update(get_module_kohya_state_dict(unet, LORA_PREFIX_UNET, dtype, LORA_ADAPTER_NAME)) # Save state dict save_file( kohya_ss_state_dict, args.dump_path, )
peft/examples/lora_dreambooth/convert_peft_sd_lora_to_kohya_ss.py/0
{ "file_path": "peft/examples/lora_dreambooth/convert_peft_sd_lora_to_kohya_ss.py", "repo_id": "peft", "token_count": 1632 }
232
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import Optional import torch import transformers from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, set_seed from peft import ( LoraConfig, get_peft_model, ) def train( base_model: str = "path/to/model", data_path: str = "yahma/alpaca-cleaned", output_dir: str = "olora", batch_size: int = 16, num_epochs: int = 1, learning_rate: float = 3e-4, cutoff_len: int = 256, val_set_size: int = 16, quantize: bool = False, eval_step: int = 100, save_step: int = 100, device_map: str = "auto", lora_r: int = 32, lora_alpha: int = 16, lora_dropout: float = 0.05, lora_target_modules: list[str] = None, torch_dtype: str = "float16", init_lora_weights="olora", seed: Optional[int] = None, ): # Set device_map to the right place when enabling DDP. world_size = int(os.environ.get("WORLD_SIZE", 0)) or int(os.environ.get("PMI_SIZE", 0)) if world_size > 1 and device_map != "cpu": from accelerate import Accelerator device_map = {"": Accelerator().process_index} # Set seed if seed is not None: set_seed(seed) model_kwargs = {"torch_dtype": getattr(torch, torch_dtype), "device_map": device_map} if quantize: model_kwargs["quantization_config"] = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", ) model = AutoModelForCausalLM.from_pretrained(base_model, **model_kwargs) tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True) # For some tokenizer with no pad token like llama if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token def tokenize(prompt, add_eos_token=True): result = tokenizer( prompt, truncation=True, max_length=cutoff_len, padding=False, return_tensors=None, ) if ( result["input_ids"][-1] != tokenizer.eos_token_id and len(result["input_ids"]) < cutoff_len and add_eos_token ): result["input_ids"].append(tokenizer.eos_token_id) result["attention_mask"].append(1) result["labels"] = result["input_ids"].copy() return result def generate_and_tokenize_prompt(example): full_prompt = generate_prompt(example) tokenized_full_prompt = tokenize(full_prompt) return tokenized_full_prompt config = LoraConfig( r=lora_r, lora_alpha=lora_alpha, target_modules=lora_target_modules, lora_dropout=lora_dropout, bias="none", task_type="CAUSAL_LM", init_lora_weights=init_lora_weights, ) model = get_peft_model(model, config) data = load_dataset(data_path) train_val = data["train"].train_test_split(test_size=val_set_size, shuffle=True, seed=42) train_data = train_val["train"].shuffle().map(generate_and_tokenize_prompt) val_data = train_val["test"].shuffle().map(generate_and_tokenize_prompt) trainer = transformers.Trainer( model=model, train_dataset=train_data, eval_dataset=val_data, args=transformers.TrainingArguments( per_device_train_batch_size=batch_size, warmup_steps=100, num_train_epochs=num_epochs, learning_rate=learning_rate, logging_steps=100, optim="adamw_torch", eval_strategy="steps", save_strategy="steps", eval_steps=eval_step, save_steps=save_step, output_dir=output_dir, save_total_limit=3, load_best_model_at_end=True, ddp_find_unused_parameters=False if world_size > 1 else None, ), data_collator=transformers.DataCollatorForSeq2Seq( tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True ), ) trainer.train() model.save_pretrained(output_dir) def generate_prompt(example): return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {example["instruction"]} ### Response: {example["output"]}""" if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--base_model", type=str, default="path/to/model") parser.add_argument("--data_path", type=str, default="yahma/alpaca-cleaned") parser.add_argument("--output_dir", type=str, default="olora") parser.add_argument("--batch_size", type=int, default=16) parser.add_argument("--num_epochs", type=int, default=1) parser.add_argument("--learning_rate", type=float, default=3e-4) parser.add_argument("--cutoff_len", type=int, default=256) parser.add_argument("--val_set_size", type=int, default=16) parser.add_argument("--quantize", action="store_true") parser.add_argument("--eval_step", type=int, default=100) parser.add_argument("--save_step", type=int, default=100) parser.add_argument("--device_map", type=str, default="auto") parser.add_argument("--lora_r", type=int, default=32) parser.add_argument("--lora_alpha", type=int, default=16) parser.add_argument("--lora_dropout", type=float, default=0.05) parser.add_argument("--lora_target_modules", type=str, default=None) parser.add_argument("--torch_dtype", type=str, default="float16") parser.add_argument("--init_lora_weights", type=str, default="olora") parser.add_argument("--seed", type=int, default=None) args = parser.parse_args() train( base_model=args.base_model, data_path=args.data_path, output_dir=args.output_dir, batch_size=args.batch_size, num_epochs=args.num_epochs, learning_rate=args.learning_rate, cutoff_len=args.cutoff_len, val_set_size=args.val_set_size, quantize=args.quantize, eval_step=args.eval_step, save_step=args.save_step, device_map=args.device_map, lora_r=args.lora_r, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, lora_target_modules=args.lora_target_modules, torch_dtype=args.torch_dtype, init_lora_weights=args.init_lora_weights, seed=args.seed, )
peft/examples/olora_finetuning/olora_finetuning.py/0
{ "file_path": "peft/examples/olora_finetuning/olora_finetuning.py", "repo_id": "peft", "token_count": 3088 }
233
""" This exampe demonstrates loading of LoRA adapter (via PEFT) into an FP8 INC-quantized FLUX model. More info on Intel Neural Compressor (INC) FP8 quantization is available at: https://github.com/intel/neural-compressor/tree/master/examples/helloworld/fp8_example Requirements: pip install optimum-habana sentencepiece neural-compressor[pt] peft """ import importlib import torch from neural_compressor.torch.quantization import FP8Config, convert, finalize_calibration, prepare # Checks if HPU device is available # Adapted from https://github.com/huggingface/accelerate/blob/b451956fd69a135efc283aadaa478f0d33fcbe6a/src/accelerate/utils/imports.py#L435 def is_hpu_available(): if ( importlib.util.find_spec("habana_frameworks") is None or importlib.util.find_spec("habana_frameworks.torch") is None ): return False import habana_frameworks.torch # noqa: F401 return hasattr(torch, "hpu") and torch.hpu.is_available() # Ensure HPU device is available before proceeding if is_hpu_available(): from optimum.habana.diffusers import GaudiFluxPipeline else: raise RuntimeError("HPU device not found. This code requires Intel Gaudi device to run.") # Example: FLUX model inference on HPU via optimum-habana pipeline hpu_configs = { "use_habana": True, "use_hpu_graphs": True, "sdp_on_bf16": True, "gaudi_config": "Habana/stable-diffusion", } pipe = GaudiFluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, **hpu_configs) prompt = "A picture of sks dog in a bucket" # Quantize FLUX transformer to FP8 using INC (Intel Neural Compressor) quant_configs = { "mode": "AUTO", "observer": "maxabs", "scale_method": "maxabs_hw", "allowlist": {"types": [], "names": []}, "blocklist": {"types": [], "names": []}, "dump_stats_path": "/tmp/hqt_output/measure", } config = FP8Config(**quant_configs) pipe.transformer = prepare(pipe.transformer, config) pipe(prompt) finalize_calibration(pipe.transformer) pipe.transformer = convert(pipe.transformer) # Load LoRA weights with PEFT pipe.load_lora_weights("dsocek/lora-flux-dog", adapter_name="user_lora") # Run inference image = pipe(prompt).images[0] image.save("dog.png")
peft/examples/stable_diffusion/inc_flux_lora_hpu.py/0
{ "file_path": "peft/examples/stable_diffusion/inc_flux_lora_hpu.py", "repo_id": "peft", "token_count": 815 }
234
{ "auto_mapping": null, "base_model_name_or_path": null, "bias": "none", "exclude_modules": null, "inference_mode": false, "init_weights": "bat", "layers_pattern": null, "layers_to_transform": null, "modules_to_save": null, "peft_type": "BONE", "r": 64, "revision": null, "target_modules": [ "v_proj", "q_proj" ], "task_type": null }
peft/method_comparison/MetaMathQA/experiments/bone/llama-3.2-3B-bat/adapter_config.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/bone/llama-3.2-3B-bat/adapter_config.json", "repo_id": "peft", "token_count": 166 }
235
{ "optimizer_kwargs": { "lr": 0.2 } }
peft/method_comparison/MetaMathQA/experiments/trainable_tokens/llama-3.2-3B-sos+eos/training_params.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/trainable_tokens/llama-3.2-3B-sos+eos/training_params.json", "repo_id": "peft", "token_count": 27 }
236
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Data handling utilities for PEFT benchmarking. """ import json import os from typing import Optional from transformers import PreTrainedTokenizer from utils import BenchmarkConfig DEFAULT_PROMPTS_PATH = os.path.join(os.path.dirname(__file__), "configs", "prompts.json") def load_test_prompts(config: dict) -> dict[str, list[str]]: """ Load prompts from JSON file. Args: config: Configuration containing prompts file path Returns: dictionary with prompts by category """ prompts_file = getattr(config, "prompts_file", DEFAULT_PROMPTS_PATH) with open(prompts_file) as f: prompts = json.load(f) return prompts def truncate_prompt_for_model( prompt: str, tokenizer: PreTrainedTokenizer, max_length: Optional[int] = None, reserve_output_tokens: int = 50, ) -> str: """ Truncate a prompt to fit within the model's context window. Args: prompt: Input prompt tokenizer: Model tokenizer max_length: Maximum sequence length (if None, uses model's max_length) reserve_output_tokens: Number of tokens to reserve for response Returns: Truncated prompt """ if max_length is None: if hasattr(tokenizer, "model_max_length"): max_length = tokenizer.model_max_length else: max_length = 2048 max_prompt_length = max_length - reserve_output_tokens input_ids = tokenizer.encode(prompt, return_tensors="pt")[0] if len(input_ids) <= max_prompt_length: return prompt truncated_ids = input_ids[:max_prompt_length] truncated_prompt = tokenizer.decode(truncated_ids, skip_special_tokens=True) return truncated_prompt def prepare_benchmark_prompts( config: BenchmarkConfig, tokenizer: PreTrainedTokenizer, max_input_length: Optional[int] = None, seed: int = 42, ) -> dict[str, list[str]]: """ Prepare prompts for benchmarking, ensuring appropriate length and variety. Always returns all prompt categories for consistent benchmarking. Args: config: Benchmark configuration tokenizer: Model tokenizer max_input_length: Maximum input length (overrides model default if provided) seed: Random seed (kept for backwards compatibility) Returns: Dictionary with processed prompts by category (all categories included) """ all_prompts = load_test_prompts(config) processed_prompts = {} for category, prompts in all_prompts.items(): truncated_prompts = [ truncate_prompt_for_model( prompt, tokenizer, max_length=max_input_length, reserve_output_tokens=getattr(config, "reserve_output_tokens", 50), ) for prompt in prompts ] processed_prompts[category] = truncated_prompts return processed_prompts
peft/method_comparison/text_generation_benchmark/data.py/0
{ "file_path": "peft/method_comparison/text_generation_benchmark/data.py", "repo_id": "peft", "token_count": 1299 }
237
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import find_packages, setup VERSION = "0.17.2.dev0" extras = {} extras["quality"] = [ "black", # doc-builder has an implicit dependency on Black, see huggingface/doc-builder#434 "hf-doc-builder", "ruff~=0.12.8", ] extras["docs_specific"] = [ "black", # doc-builder has an implicit dependency on Black, see huggingface/doc-builder#434 "hf-doc-builder", ] extras["dev"] = extras["quality"] + extras["docs_specific"] extras["test"] = extras["dev"] + [ "pytest", "pytest-cov", "pytest-xdist", "parameterized", "datasets", "diffusers", "scipy", "protobuf", "sentencepiece", ] setup( name="peft", version=VERSION, description="Parameter-Efficient Fine-Tuning (PEFT)", license_files=["LICENSE"], long_description=open("README.md", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="deep learning", license="Apache", author="The HuggingFace team", author_email="benjamin@huggingface.co", url="https://github.com/huggingface/peft", package_dir={"": "src"}, packages=find_packages("src"), package_data={"peft": ["py.typed", "tuners/boft/fbd/fbd_cuda.cpp", "tuners/boft/fbd/fbd_cuda_kernel.cu"]}, entry_points={}, python_requires=">=3.9.0", install_requires=[ "numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.13.0", "transformers", "tqdm", "accelerate>=0.21.0", "safetensors", "huggingface_hub>=0.25.0", ], extras_require=extras, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], ) # Release checklist # 1. Change the version in __init__.py and setup.py to the release version, e.g. from "0.6.1.dev0" to "0.7.0" # 2. Check if there are any deprecations that need to be addressed for this release by searching for "# TODO" in the code # 3. Commit these changes with the message: "Release: VERSION", create a PR and merge it. # 4. Add a tag in git to mark the release: "git tag -a v<VERSION> -m 'Adds tag <VERSION> for pypi' " # Push the tag to git: # git push --tags origin main # It is necessary to work on the original repository, not on a fork. # 5. Run the following commands in the top-level directory: # python setup.py bdist_wheel # python setup.py sdist # Ensure that you are on the clean and up-to-date main branch (git status --untracked-files=no should not list any # files and show the main branch) # 6. Upload the package to the pypi test server first: # twine upload dist/* -r pypitest # 7. Check that you can install it in a virtualenv by running: # pip install -i https://testpypi.python.org/pypi --extra-index-url https://pypi.org/simple peft # 8. Upload the final version to actual pypi: # twine upload dist/* -r pypi # 9. Add release notes to the tag on https://github.com/huggingface/peft/releases once everything is looking hunky-dory. # Check the notes here: https://docs.google.com/document/d/1k-sOIfykuKjWcOIALqjhFKz4amFEp-myeJUJEzNgjoU/edit?usp=sharing # 10. Update the version in __init__.py, setup.py to the bumped patch version + ".dev0" (e.g. from "0.7.0" to "0.7.1.dev0")
peft/setup.py/0
{ "file_path": "peft/setup.py", "repo_id": "peft", "token_count": 1655 }
238
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.utils import register_peft_method from .config import AdaLoraConfig from .gptq import SVDQuantLinear from .layer import AdaLoraLayer, RankAllocator, SVDLinear from .model import AdaLoraModel __all__ = ["AdaLoraConfig", "AdaLoraLayer", "AdaLoraModel", "RankAllocator", "SVDLinear", "SVDQuantLinear"] register_peft_method( name="adalora", config_cls=AdaLoraConfig, model_cls=AdaLoraModel, prefix="lora_", is_mixed_compatible=True ) def __getattr__(name): if (name == "SVDLinear8bitLt") and is_bnb_available(): from .bnb import SVDLinear8bitLt return SVDLinear8bitLt if (name == "SVDLinear4bit") and is_bnb_4bit_available(): from .bnb import SVDLinear4bit return SVDLinear4bit raise AttributeError(f"module {__name__} has no attribute {name}")
peft/src/peft/tuners/adalora/__init__.py/0
{ "file_path": "peft/src/peft/tuners/adalora/__init__.py", "repo_id": "peft", "token_count": 498 }
239
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The implementation is based on "Parameter-Efficient Orthogonal Finetuning # via Butterfly Factorization" (https://huggingface.co/papers/2311.06243) in ICLR 2024. from __future__ import annotations import math import os import warnings from contextlib import contextmanager from typing import Any, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Function from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge _FBD_CUDA = None # this function is a 1:1 copy from accelerate @contextmanager def patch_environment(**kwargs): """ A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting. Will convert the values in `kwargs` to strings and upper-case all the keys. Example: ```python >>> import os >>> from accelerate.utils import patch_environment >>> with patch_environment(FOO="bar"): ... print(os.environ["FOO"]) # prints "bar" >>> print(os.environ["FOO"]) # raises KeyError ``` """ existing_vars = {} for key, value in kwargs.items(): key = key.upper() if key in os.environ: existing_vars[key] = os.environ[key] os.environ[key] = str(value) yield for key in kwargs: key = key.upper() if key in existing_vars: # restore previous value os.environ[key] = existing_vars[key] else: os.environ.pop(key, None) def get_fbd_cuda(): global _FBD_CUDA if _FBD_CUDA is not None: return _FBD_CUDA # This import initializes cuda context and should thus be local, see issue 1877 from torch.utils.cpp_extension import load curr_dir = os.path.dirname(__file__) # need ninja to build the extension try: with patch_environment(CC="gcc", CXX="gcc"): fbd_cuda = load( name="fbd_cuda", sources=[f"{curr_dir}/fbd/fbd_cuda.cpp", f"{curr_dir}/fbd/fbd_cuda_kernel.cu"], verbose=True, # build_directory='/tmp/' # for debugging ) # extra_cuda_cflags = ['-std=c++14', '-ccbin=$$(which gcc-7)']) # cuda10.2 is not compatible with gcc9. Specify gcc 7 except Exception as e: warnings.warn(f"Failed to load the CUDA extension: {e}, check if ninja is available.") warnings.warn("Setting boft_n_butterfly_factor to 1 to speed up the finetuning process.") fbd_cuda = None _FBD_CUDA = fbd_cuda return _FBD_CUDA class FastBlockDiag(Function): """ Implements a custom autograd Function for a fast block diagonal operation using CUDA. This function is optimized for 4D tensors where the last two dimensions are equal, representing block diagonal matrices for efficient computation on CUDA devices. """ @staticmethod def forward(ctx, input): """ The forward method for FastBlockDiag. Computes the block diagonal operation on the input tensor using a CUDA-optimized function. This method assumes that the input is a 4D tensor where the last two dimensions are equal, which represent the blocks to be diagonalized. Parameters: ctx: A context object that can be used to stash information for backward computation. input (Tensor): The input tensor of shape (N, D, H, H), where `N` is the batch size, `D` represents one additional dimension (In BOFT, the number of BOFT blocks), and `H` is the size of the square blocks along the last two dimensions (In BOFT, the block size). Returns: Tensor: The resulting tensor after applying the block diagonal operation, will have the shape (N, DxH, DxH). """ output = get_fbd_cuda().forward(input)[0] ctx.save_for_backward(input) return output @staticmethod def backward(ctx, grad_output): (input,) = ctx.saved_tensors grad_input = get_fbd_cuda().backward(grad_output, input)[0] return grad_input class MultiplicativeDropoutLayer(nn.Module): """ Implements the multiplicative dropout layer for BOFT. """ def __init__(self, p=0.0): """ Initializes the multiplicative dropout layer. Parameters: p (float): The probability of dropping out a block. Defaults to 0.0. """ super().__init__() self.p = p def forward(self, x): """ Applies multiplicative dropout to the input tensor. Parameters: x (Tensor): The input tensor of shape (N, D, H, H), where `N` is the batch size, `D` represents one additional dimension (In BOFT, the number of BOFT blocks), and `H` is the size of the square blocks along the last two dimensions (In BOFT, the block size). """ if self.training: # Ensure the last two dimensions are the same if x.shape[-1] != x.shape[-2]: raise ValueError("The last two dimensions of input should be the same!") N, D, H, _ = x.shape # Randomly select one from N n_random = torch.randint(0, N, (1,)).item() # Create a mask with 1s for matrices to be replaced with identity and 0s otherwise num_to_replace = int(self.p * D) num_zeros = D - num_to_replace # Generate a flat tensor with desired number of 1s and 0s mask = torch.cat([torch.ones(num_to_replace, device=x.device), torch.zeros(num_zeros, device=x.device)]) # Shuffle and reshape the mask mask = mask[torch.randperm(D)].view(1, D, 1, 1) full_mask = torch.zeros(N, D, 1, 1, device=x.device) full_mask[n_random] = mask # Use the mask to combine original matrices and identity matrices eye_matrix = torch.eye(H, device=x.device).repeat(N, D, 1, 1) x = (1 - full_mask) * x + full_mask * eye_matrix return x class BOFTLayer(BaseTunerLayer): """ Implements the BOFT layer. """ # All names of layers that may contain (trainable) adapter weights adapter_layer_names = ("boft_R", "boft_s") # All names of other parameters that may contain adapter-related parameters other_param_names = ("boft_block_size", "boft_block_num", "boft_dropout") def __init__(self, base_layer: nn.Module, **kwargs) -> None: """ Initializes the BOFT layer. Note, currently only support linear layer and convolutional layer, with further support for other layers to be added soon. Parameters: base_layer: the pretrained model layer """ self.base_layer = base_layer self.boft_block_size = {} self.boft_block_num = {} self.boft_dropout = nn.ModuleDict({}) self.boft_R = nn.ParameterDict({}) self.boft_s = nn.ParameterDict({}) # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] # flag to enable/disable casting of input to weight dtype during forward call self.cast_input_dtype_enabled = True self.kwargs = kwargs base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): in_features, out_features = base_layer.in_features, base_layer.out_features elif isinstance(base_layer, nn.Conv2d): in_features, out_features = base_layer.in_channels, base_layer.out_channels else: raise ValueError(f"Unsupported layer type {type(base_layer)}") self.in_features = in_features self.out_features = out_features def set_scale(self, adapter, scale): if adapter not in self.scaling: # Ignore the case where the adapter is not in the layer return warnings.warn("Scaling operation for BOFT not supported! Automatically set scale to 1.") def scale_layer(self, scale: float) -> None: if scale == 1: return for active_adapter in self.active_adapters: if active_adapter not in self.boft_R.keys(): continue warnings.warn("Scaling operation for BOFT not supported! Automatically set scale to 1.") def unscale_layer(self, scale=None) -> None: for active_adapter in self.active_adapters: if active_adapter not in self.boft_R.keys(): continue warnings.warn("Unscaling operation for BOFT not supported! Keeping scale to 1.") def update_layer( self, adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights ): """ Update the linear layer with trainable BOFT weights. Override for other layer types. """ # Attempt to load the CUDA extension during model initialization if not get_fbd_cuda(): self.fbd_cuda_available = False # If the CUDA extension is not available, set the butterfly factor to 1 to speed up the finetuning process boft_n_butterfly_factor = 1 else: self.fbd_cuda_available = True # to be consistent with the paper notation boft_n_butterfly_factor = boft_n_butterfly_factor - 1 if boft_n_butterfly_factor < 0: raise ValueError( f"You can only specify boft_n_butterfly_factor {boft_n_butterfly_factor + 1} to be a positive integer number." ) # Initialize the MultiplicativeDropoutLayer for boft_dropout > 0.0. if boft_dropout > 0.0: boft_dropout_layer = MultiplicativeDropoutLayer(p=boft_dropout) else: boft_dropout_layer = nn.Identity() self.boft_dropout.update(nn.ModuleDict({adapter_name: boft_dropout_layer})) if boft_block_size == 0 and boft_block_num != 0: if self.in_features % boft_block_num != 0: raise ValueError( f"in_features ({self.in_features}) must be divisible by boft_block_num ({boft_block_num})!" ) if boft_n_butterfly_factor != 0: if boft_n_butterfly_factor > int(math.log2(boft_block_num)): raise ValueError( f"Invalid combination of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_num ({boft_block_num})!" ) if boft_block_num % (2**boft_n_butterfly_factor) != 0: raise ValueError( f"boft_block_num ({boft_block_num}) must be a multiple of 2 raised to the power of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1})!" ) boft_block_size = int(self.in_features // boft_block_num) elif boft_block_size != 0 and boft_block_num == 0: if self.in_features % boft_block_size != 0: raise ValueError( f"in_features ({self.in_features}) must be divisible by boft_block_size ({boft_block_size})!" ) if boft_n_butterfly_factor != 0: if self.in_features < (boft_block_size * (2**boft_n_butterfly_factor)): raise ValueError( f"Invalid combination of in_features ({self.in_features}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!" ) if self.in_features % (boft_block_size * (2**boft_n_butterfly_factor)) != 0: raise ValueError( f"Invalid combination of in_features ({self.in_features}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!" ) boft_block_num = int(self.in_features // boft_block_size) else: raise ValueError( "Something went wrong, please report this error: https://github.com/huggingface/peft/issues" ) # In OFT you can specify the number of blocks to be 1 if boft_n_butterfly_factor != 0: if boft_block_num % 2 != 0: raise ValueError(f"boft_block_num ({boft_block_num}) must be an even number!") if boft_block_size % 2 != 0: raise ValueError(f"boft_block_size ({boft_block_size}) must be an even number!") # If there is no butterfly factor, then permutation matrix P will be an identity matrix. P = torch.empty((boft_n_butterfly_factor + 1, self.in_features, self.in_features)) for i in range(boft_n_butterfly_factor + 1): perm = self.block_butterfly_perm( self.in_features, int(boft_block_num / (2 ** (i))), int(boft_block_size / 2), boft_n_butterfly_factor ) perm_mat = self.perm2mat(perm) P[i] = perm_mat self.register_buffer("boft_P", P, persistent=False) self.boft_R[adapter_name] = nn.Parameter( torch.zeros(boft_n_butterfly_factor + 1, boft_block_num, boft_block_size, boft_block_size) ) self.boft_s[adapter_name] = nn.Parameter(torch.ones(int(self.out_features), 1)) self.reset_boft_parameters(adapter_name, init_weights) # set the boft block size and number self.boft_block_size[adapter_name] = boft_block_size self.boft_block_num[adapter_name] = boft_block_num self._move_adapter_to_device_of_base_layer(adapter_name) self.set_adapter(self.active_adapters) def reset_boft_parameters(self, adapter_name, init_weights): """ Reset the BOFT parameters. """ if init_weights is False: nn.init.normal_(self.boft_R[adapter_name], mean=0.0, std=0.1) nn.init.normal_(self.boft_s[adapter_name], mean=1.0, std=0.1) return if adapter_name in self.boft_R.keys(): if init_weights is True: # initialize R to zero nn.init.zeros_(self.boft_R[adapter_name]) nn.init.ones_(self.boft_s[adapter_name]) else: raise ValueError(f"Unknown initialization {init_weights=}") def perm2mat(self, indices): """ Convert permutation indices to permutation matrix. Args: indices: A list of indices representing the permutation. """ # Number of indices determines the size of the square matrix n = len(indices) # Initialize a matrix of zeros perm_mat = torch.zeros((n, n)) # Set the 1s according to the indices for i, idx in enumerate(indices): perm_mat[i, idx] = 1 return perm_mat def block_butterfly_perm(self, n, b, r=3, n_butterfly_factor=1): """ Define the permutation matrix for the block butterfly permutation. Args: n: size of the permutation matrix b: desired number of blocks after multiplying with the permutation matrix r: base block size of the block diagonal matrix, e.g. 2x2, 3x3, 5x5 etc. """ if n_butterfly_factor == 0: return torch.arange(n) if b * r * 2 > n: raise ValueError("Invalid number of blocks!") block_size = int(n // b) indices = torch.arange(n) def sort_block(b, r): step = b / r initial_order = torch.arange(b) sorted_order = torch.empty(b, dtype=torch.long) evens = torch.arange(0, step, 2) odds = torch.arange(1, step, 2) sorted_seq = torch.cat((evens, odds), dim=0) for i, pos in enumerate(sorted_seq): sorted_order[int(i * r) : int(i * r + r)] = initial_order[int(pos * r) : int(pos * r + r)] return sorted_order sorted_order = sort_block(block_size, r) for i in range(0, n, block_size): block_end = i + block_size tmp_indices = indices[i:block_end] indices[i:block_end] = tmp_indices[sorted_order] return indices def cayley_batch(self, data): """ Perform the Cayley parametrization on a batch of skew-symmetric matrices. Args: data: A batch of skew-symmetric matrices of shape (b, r, c). """ b, r, c = data.shape # Ensure the input matrix is skew-symmetric skew_mat = 0.5 * (data - data.transpose(1, 2)) id_mat = torch.eye(r, device=data.device).unsqueeze(0).expand(b, r, c) # Perform the Cayley parametrization Q = torch.linalg.solve(id_mat + skew_mat, id_mat - skew_mat, left=False) return Q class Linear(nn.Module, BOFTLayer): """ BOFT implemented in a dense layer. """ def __init__( self, base_layer, adapter_name: str, boft_block_size: int = 8, boft_block_num: int = 0, boft_n_butterfly_factor: int = 0, boft_dropout: float = 0.1, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) init_weights: Union[bool, str] = True, is_target_conv_1d_layer: bool = False, **kwargs, ) -> None: super().__init__() BOFTLayer.__init__(self, base_layer, **kwargs) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer( adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights ) self.is_target_conv_1d_layer = is_target_conv_1d_layer def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.boft_R.keys(): base_layer = self.get_base_layer() orig_dtype = base_layer.weight.dtype if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weight = base_layer.weight.data.clone() butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter) orig_weight = torch.transpose(orig_weight, 0, 1) orig_weight = torch.mm(butterfly_oft_mat, orig_weight.to(butterfly_oft_mat.dtype)) orig_weight = torch.transpose(orig_weight, 0, 1) orig_weight = orig_weight * boft_s if not torch.isfinite(orig_weight).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) self.base_layer.weight.data = orig_weight.contiguous().to(orig_dtype) else: butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter) orig_weight = base_layer.weight.data.clone() orig_weight = torch.transpose(orig_weight, 0, 1) orig_weight = torch.mm(butterfly_oft_mat, orig_weight.to(butterfly_oft_mat.dtype)) orig_weight = torch.transpose(orig_weight, 0, 1) orig_weight = orig_weight * boft_s self.base_layer.weight.data = orig_weight.contiguous().to(orig_dtype) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() base_layer = self.get_base_layer() orig_dtype = base_layer.weight.dtype if active_adapter in self.boft_R.keys(): butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter) orig_weight = base_layer.weight.data.clone() orig_weight = torch.transpose(orig_weight, 0, 1) orig_weight = torch.mm(butterfly_oft_mat.t(), orig_weight.to(butterfly_oft_mat.dtype)) orig_weight = torch.transpose(orig_weight, 0, 1) base_layer.weight.data = (orig_weight * (1 / boft_s)).to(orig_dtype) def get_delta_weight(self, adapter) -> tuple[torch.Tensor, torch.Tensor]: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ boft_R = self.boft_R[adapter] boft_s = self.boft_s[adapter] N, D, H, _ = boft_R.shape boft_R = boft_R.view(N * D, H, H) orth_rotate_butterfly = self.cayley_batch(boft_R) orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H) if self.fbd_cuda_available: block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly) else: orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0) block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly)) block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0) boft_P = self.boft_P.to(block_diagonal_butterfly.device) butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1)) butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch) butterfly_oft_mat = butterfly_oft_mat_batch[0] for i in range(1, butterfly_oft_mat_batch.shape[0]): butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat return butterfly_oft_mat, boft_s def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: boft_rotation = torch.eye(self.in_features, device=x.device, dtype=previous_dtype) boft_scale = torch.ones((int(self.out_features), 1), device=x.device, dtype=previous_dtype) for active_adapter in self.active_adapters: if active_adapter not in self.boft_R.keys(): continue boft_R = self.boft_R[active_adapter] boft_s = self.boft_s[active_adapter] dropout = self.boft_dropout[active_adapter] N, D, H, _ = boft_R.shape boft_R = boft_R.view(N * D, H, H) orth_rotate_butterfly = self.cayley_batch(boft_R) orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H) orth_rotate_butterfly = dropout(orth_rotate_butterfly) if self.fbd_cuda_available: block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly) else: orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0) block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly)) block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0) # The BOFT author's cayley_batch, dropout and FastBlockDiag ONLY return fp32 outputs. boft_P = self.boft_P.to(x) block_diagonal_butterfly = block_diagonal_butterfly.to(x) butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1)) butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch) butterfly_oft_mat = butterfly_oft_mat_batch[0] for i in range(1, butterfly_oft_mat_batch.shape[0]): butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat boft_rotation = butterfly_oft_mat @ boft_rotation boft_scale = boft_s * boft_scale x = x.to(self.get_base_layer().weight.data.dtype) orig_weight = self.get_base_layer().weight.data orig_weight = torch.transpose(orig_weight, 0, 1) boft_rotation = boft_rotation.to(previous_dtype) orig_weight = orig_weight.to(previous_dtype) rotated_weight = torch.mm(boft_rotation, orig_weight) rotated_weight = torch.transpose(rotated_weight, 0, 1) scaled_rotated_weight = rotated_weight * boft_scale scaled_rotated_weight = scaled_rotated_weight.to(previous_dtype) if self.base_layer.bias is not None: self.base_layer.bias = self.base_layer.bias.to(previous_dtype) result = F.linear(input=x, weight=scaled_rotated_weight, bias=self.base_layer.bias) result = result.to(previous_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "boft." + rep class Conv2d(nn.Module, BOFTLayer): """ BOFT implemented in a Conv2d layer. """ def __init__( self, base_layer: nn.Module, adapter_name: str, boft_block_size: int = 8, boft_block_num: int = 0, boft_n_butterfly_factor: int = 0, boft_dropout: float = 0.1, init_weights: Union[bool, str] = True, **kwargs, ) -> None: super().__init__() BOFTLayer.__init__(self, base_layer) self._active_adapter = adapter_name self.update_layer( adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights ) def update_layer( self, adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights ): """ Update the conv2d layer with trainable BOFT weights. """ # Attempt to load the CUDA extension during model initialization if not get_fbd_cuda(): self.fbd_cuda_available = False # If the CUDA extension is not available, set the butterfly factor to 1 to speed up the finetuning process boft_n_butterfly_factor = 1 else: self.fbd_cuda_available = True # to be consistent with the paper notation boft_n_butterfly_factor = boft_n_butterfly_factor - 1 if boft_n_butterfly_factor < 0: raise ValueError( f"You can only specify boft_n_butterfly_factor {boft_n_butterfly_factor + 1} to be a positive integer number." ) # Initialize the MultiplicativeDropoutLayer for boft_dropout > 0.0. if boft_dropout > 0.0: boft_dropout_layer = MultiplicativeDropoutLayer(p=boft_dropout) else: boft_dropout_layer = nn.Identity() self.boft_dropout.update(nn.ModuleDict({adapter_name: boft_dropout_layer})) # layer information from the base layer base_layer = self.get_base_layer() conv_filter_dim = self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0] # Initialize the BOFT parameters. if boft_block_size == 0 and boft_block_num != 0: if conv_filter_dim % boft_block_num != 0: raise ValueError( f"Convolutional kernel dimension ({conv_filter_dim}) must be divisible by boft_block_num ({boft_block_num})!" ) if boft_n_butterfly_factor != 0: if boft_n_butterfly_factor > int(math.log2(boft_block_num)): raise ValueError( f"Invalid combination of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_num ({boft_block_num})!" ) if boft_block_num % (2**boft_n_butterfly_factor) != 0: raise ValueError( f"boft_block_num ({boft_block_num}) must be a multiple of 2 raised to the power of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1})!" ) boft_block_size = int(conv_filter_dim // boft_block_num) elif boft_block_size != 0 and boft_block_num == 0: if conv_filter_dim % boft_block_size != 0: raise ValueError( f"Convolutional kernel dimension ({conv_filter_dim}) must be divisible by boft_block_size ({boft_block_size})!" ) if boft_n_butterfly_factor != 0: if conv_filter_dim < (boft_block_size * (2**boft_n_butterfly_factor)): raise ValueError( f"Invalid combination of convolutional kernel dimension ({conv_filter_dim}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!" ) if conv_filter_dim % (boft_block_size * (2**boft_n_butterfly_factor)) != 0: raise ValueError( f"Invalid combination of convolutional kernel dimension ({conv_filter_dim}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!" ) boft_block_num = int(conv_filter_dim // boft_block_size) else: raise ValueError( "Something went wrong, please report this error: https://github.com/huggingface/peft/issues" ) # In OFT you can specify the number of blocks to be 1 if boft_n_butterfly_factor != 0: if boft_block_num % 2 != 0: raise ValueError(f"boft_block_num ({boft_block_num}) must be an even number!") if boft_block_size % 2 != 0: raise ValueError(f"boft_block_size ({boft_block_size}) must be an even number!") # If there is no butterfly factor, then permutation matrix P will be an identity matrix. P = torch.empty((boft_n_butterfly_factor + 1, conv_filter_dim, conv_filter_dim)) for i in range(boft_n_butterfly_factor + 1): perm = self.block_butterfly_perm( conv_filter_dim, int(boft_block_num / (2 ** (i))), int(boft_block_size / 2), boft_n_butterfly_factor ) perm_mat = self.perm2mat(perm) P[i] = perm_mat self.register_buffer("boft_P", P, persistent=False) self.boft_R[adapter_name] = nn.Parameter( torch.zeros(boft_n_butterfly_factor + 1, boft_block_num, boft_block_size, boft_block_size) ) self.boft_s[adapter_name] = nn.Parameter(torch.ones(1, int(self.out_features))) self.reset_boft_parameters(adapter_name, init_weights) # set the boft block size and number self.boft_block_size[adapter_name] = boft_block_size self.boft_block_num[adapter_name] = boft_block_num self._move_adapter_to_device_of_base_layer(adapter_name) self.set_adapter(self.active_adapters) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.boft_R.keys(): base_layer = self.get_base_layer() orig_dtype = base_layer.weight.dtype if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weight = base_layer.weight.data.clone() butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter) orig_weight = orig_weight.view( self.out_features, self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0] ) orig_weight = torch.transpose(orig_weight, 0, 1) orig_weight = torch.mm(butterfly_oft_mat, orig_weight.to(butterfly_oft_mat.dtype)) orig_weight = torch.transpose(orig_weight, 0, 1) orig_weight = orig_weight * boft_s orig_weight = orig_weight.view( self.out_features, self.in_features, base_layer.kernel_size[0], base_layer.kernel_size[0] ) self.base_layer.weight.data = orig_weight.contiguous().to(orig_dtype) else: butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter) orig_weight = base_layer.weight.data.clone() orig_weight = orig_weight.view( self.out_features, self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0] ) orig_weight = torch.transpose(orig_weight, 0, 1) orig_weight = torch.mm(butterfly_oft_mat, orig_weight.to(butterfly_oft_mat.dtype)) orig_weight = torch.transpose(orig_weight, 0, 1) orig_weight = orig_weight * boft_s orig_weight = orig_weight.view( self.out_features, self.in_features, base_layer.kernel_size[0], base_layer.kernel_size[0] ) self.base_layer.weight.data = orig_weight.contiguous().to(orig_dtype) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() base_layer = self.get_base_layer() orig_dtype = base_layer.weight.dtype if active_adapter in self.boft_R.keys(): butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter) orig_weight = base_layer.weight.data.clone() orig_weight = orig_weight.view( self.out_features, self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0], ) orig_weight = torch.transpose(orig_weight, 0, 1) orig_weight = torch.mm(butterfly_oft_mat.t(), orig_weight.to(butterfly_oft_mat.dtype)) orig_weight = torch.transpose(orig_weight, 0, 1) orig_weight = orig_weight * (1 / boft_s) orig_weight = orig_weight.view( self.out_features, self.in_features, base_layer.kernel_size[0], base_layer.kernel_size[0], ) base_layer.weight.data = orig_weight.to(orig_dtype) def get_delta_weight(self, adapter) -> tuple[torch.Tensor, torch.Tensor]: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ boft_R = self.boft_R[adapter] boft_s = self.boft_s[adapter].transpose(0, 1) N, D, H, _ = boft_R.shape boft_R = boft_R.view(N * D, H, H) orth_rotate_butterfly = self.cayley_batch(boft_R) orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H) if self.fbd_cuda_available: block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly) else: orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0) block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly)) block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0) boft_P = self.boft_P.to(block_diagonal_butterfly.device) butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1)) butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch) butterfly_oft_mat = butterfly_oft_mat_batch[0] for i in range(1, butterfly_oft_mat_batch.shape[0]): butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat return butterfly_oft_mat, boft_s def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: boft_rotation = torch.eye( self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0], device=x.device, dtype=x.dtype, ) boft_scale = torch.ones((int(self.out_features), 1), device=x.device, dtype=x.dtype) for active_adapter in self.active_adapters: if active_adapter not in self.boft_R.keys(): continue boft_R = self.boft_R[active_adapter] boft_s = self.boft_s[active_adapter].transpose(0, 1) dropout = self.boft_dropout[active_adapter] N, D, H, _ = boft_R.shape boft_R = boft_R.view(N * D, H, H) orth_rotate_butterfly = self.cayley_batch(boft_R) orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H) orth_rotate_butterfly = dropout(orth_rotate_butterfly) if self.fbd_cuda_available: block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly) else: orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0) block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly)) block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0) boft_P = self.boft_P.to(x) block_diagonal_butterfly = block_diagonal_butterfly.to(x) butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1)) butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch) butterfly_oft_mat = butterfly_oft_mat_batch[0] for i in range(1, butterfly_oft_mat_batch.shape[0]): butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat boft_rotation = butterfly_oft_mat @ boft_rotation boft_scale = boft_s * boft_scale x = x.to(self.base_layer.weight.data.dtype) orig_weight = self.base_layer.weight.data orig_weight = orig_weight.view( self.out_features, self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0], ) orig_weight = torch.transpose(orig_weight, 0, 1) rotated_weight = torch.mm(boft_rotation, orig_weight) rotated_weight = torch.transpose(rotated_weight, 0, 1) scaled_rotated_weight = rotated_weight * boft_scale scaled_rotated_weight = scaled_rotated_weight.view( self.out_features, self.in_features, self.base_layer.kernel_size[0], self.base_layer.kernel_size[0] ) x = self._cast_input_dtype(x, scaled_rotated_weight.dtype) bias = self._cast_input_dtype(self.base_layer.bias, scaled_rotated_weight.dtype) result = F.conv2d( input=x, weight=scaled_rotated_weight, bias=bias, padding=self.base_layer.padding[0], stride=self.base_layer.stride[0], ) result = result.to(previous_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "boft." + rep
peft/src/peft/tuners/boft/layer.py/0
{ "file_path": "peft/src/peft/tuners/boft/layer.py", "repo_id": "peft", "token_count": 20142 }
240
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Any, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from transformers.pytorch_utils import Conv1D from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge class FourierFTLayer(BaseTunerLayer): # All names of layers that may contain (trainable) adapter weights adapter_layer_names = ("fourierft_spectrum",) # All names of other parameters that may contain adapter-related parameters other_param_names = ("fourierft_n_frequency", "fourierft_scaling", "fourierft_random_loc_seed") def __init__(self, base_layer: nn.Module, **kwargs) -> None: self.base_layer = base_layer self.fourierft_n_frequency = {} self.fourierft_scaling = {} self.fourierft_spectrum = nn.ParameterDict({}) self.indices = {} self.fourierft_random_loc_seed = {} # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] self.kwargs = kwargs base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): self.in_features, self.out_features = base_layer.in_features, base_layer.out_features elif isinstance(base_layer, Conv1D): self.in_features, self.out_features = ( base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape ) else: raise ValueError(f"Unsupported layer type {type(base_layer)}") def update_layer(self, adapter_name, n_frequency, scaling, init_weights, random_loc_seed): if n_frequency <= 0: raise ValueError(f"`n_frequency` should be a positive integer value but the value passed is {n_frequency}") if n_frequency > self.in_features * self.out_features: raise ValueError( f"`n_frequency` should be less than or equal to the product of the input and output dimensions " f"but the value passed is {n_frequency} and the product is {self.in_features * self.out_features}" ) self.fourierft_n_frequency[adapter_name] = n_frequency self.fourierft_random_loc_seed[adapter_name] = random_loc_seed self.indices[adapter_name] = torch.randperm( self.out_features * self.in_features, generator=torch.Generator().manual_seed(self.fourierft_random_loc_seed[adapter_name]), )[:n_frequency] self.indices[adapter_name] = torch.stack( [self.indices[adapter_name] // self.in_features, self.indices[adapter_name] % self.in_features], dim=0 ) self.fourierft_scaling[adapter_name] = scaling # Actual trainable parameters self.fourierft_spectrum[adapter_name] = nn.Parameter(torch.randn(n_frequency), requires_grad=True) if init_weights: self.reset_fourier_parameters(adapter_name) self._move_adapter_to_device_of_base_layer(adapter_name) self.set_adapter(self.active_adapters) @torch.no_grad() def reset_fourier_parameters(self, adapter_name): if adapter_name in self.fourierft_spectrum.keys(): nn.init.zeros_(self.fourierft_spectrum[adapter_name]) def get_delta_weight(self, adapter) -> torch.Tensor: # careful: ifft2 does not work with float16 or bfloat16 spectrum = self.fourierft_spectrum[adapter] indices = self.indices[adapter].to(spectrum.device) dense_spectrum = torch.zeros(self.out_features, self.in_features, device=spectrum.device) dense_spectrum[indices[0, :], indices[1, :]] = spectrum.float() delta_weight = torch.fft.ifft2(dense_spectrum).real * self.fourierft_scaling[adapter] return delta_weight.to(spectrum.dtype) class FourierFTLinear(nn.Module, FourierFTLayer): # FourierFT implemented in a dense layer def __init__( self, base_layer, adapter_name: str, n_frequency: int = 1000, scaling: float = 150.0, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) init_weights: Union[bool, str] = False, random_loc_seed: int = 777, **kwargs, ) -> None: super().__init__() FourierFTLayer.__init__(self, base_layer, **kwargs) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer(adapter_name, n_frequency, scaling, init_weights, random_loc_seed) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.fourierft_spectrum.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() orig_weights += self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data += self.get_delta_weight(active_adapter) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.fourierft_spectrum.keys(): self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def get_delta_weight(self, adapter) -> torch.Tensor: return super().get_delta_weight(adapter) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.fourierft_spectrum.keys(): continue delta_w = self.get_delta_weight(active_adapter) x = x.to(delta_w.dtype) result = result + F.linear(x, delta_w) result = result.to(previous_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "fourierft." + rep
peft/src/peft/tuners/fourierft/layer.py/0
{ "file_path": "peft/src/peft/tuners/fourierft/layer.py", "repo_id": "peft", "token_count": 3663 }
241
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass, field from typing import Optional, Union from peft.tuners.lycoris_utils import LycorisConfig from peft.utils import PeftType @dataclass class LoHaConfig(LycorisConfig): """ This is the configuration class to store the configuration of a [`LoHaModel`]. Args: r (`int`): LoHa rank. alpha (`int`): The alpha parameter for LoHa scaling. rank_dropout (`float`): The dropout probability for rank dimension during training. module_dropout (`float`): The dropout probability for disabling LoHa modules during training. use_effective_conv2d (`bool`): Use parameter effective decomposition for Conv2d with ksize > 1 ("Proposition 3" from FedPara paper). target_modules (`Optional[Union[List[str], str]]`): The names of the modules to apply the adapter to. If this is specified, only the modules with the specified names will be replaced. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen, excluding the output layer. If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. exclude_modules (`Optional[Union[List[str], str]]`): The names of the modules to not apply the adapter. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. init_weights (`bool`): Whether to perform initialization of adapter weights. This defaults to `True`, passing `False` is discouraged. layers_to_transform (`Union[List[int], int]`): The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices that are specified in this list. If a single integer is passed, it will apply the transformations on the layer at this index. layers_pattern (`Optional[Union[List[str], str]]`): The layer pattern name, used only if `layers_to_transform` is different from `None`. This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`. rank_pattern (`dict`): The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. For example, `{'^model.decoder.layers.0.encoder_attn.k_proj': 16}`. alpha_pattern (`dict`): The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `alpha`. For example, `{'^model.decoder.layers.0.encoder_attn.k_proj': 16}`. modules_to_save (`Optional[List[str]]`): List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint. """ r: int = field(default=8, metadata={"help": "LoHa rank"}) alpha: int = field(default=8, metadata={"help": "LoHa alpha"}) rank_dropout: float = field( default=0.0, metadata={"help": "The dropout probability for rank dimension during training"} ) module_dropout: float = field( default=0.0, metadata={"help": "The dropout probability for disabling LoHa modules during training"} ) use_effective_conv2d: bool = field( default=False, metadata={ "help": 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)' }, ) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with LoHa." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." }, ) exclude_modules: Optional[Union[list[str], str]] = field( default=None, metadata={"help": "List of module names or regex expression of the module names to exclude from LoHa."}, ) init_weights: bool = field( default=True, metadata={ "help": ( "Whether to initialize the weights of the LoHa layers with their default initialization. Don't change " "this setting, except if you know exactly what you're doing." ), }, ) layers_to_transform: Optional[Union[list[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index." }, ) layers_pattern: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern. " "This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`." }, ) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": "List of modules apart from LoHA layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.LOHA self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) self.exclude_modules = ( set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules ) # check for layers_to_transform and layers_pattern if self.layers_pattern and not self.layers_to_transform: raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ")
peft/src/peft/tuners/loha/config.py/0
{ "file_path": "peft/src/peft/tuners/loha/config.py", "repo_id": "peft", "token_count": 2674 }
242
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Optional import torch from peft.import_utils import is_gptqmodel_available from peft.tuners.lora.layer import LoraLayer from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import get_auto_gptq_quant_linear from .layer import LoraVariant class GPTQLoraLinear(torch.nn.Module, LoraLayer): def __init__( self, base_layer, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, use_rslora: bool = False, use_dora: bool = False, use_qalora: bool = False, lora_bias: bool = False, qalora_group_size: int = 32, **kwargs, ): super().__init__() LoraLayer.__init__(self, base_layer) if use_dora: raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False") # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter # for backwards compatibility self.quant_linear_module = base_layer self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, use_qalora=use_qalora, lora_bias=lora_bias, qalora_group_size=qalora_group_size, ) def resolve_lora_variant(self, *, use_dora: bool, use_qalora: bool, **kwargs) -> Optional[LoraVariant]: if use_dora and use_qalora: raise NotImplementedError( f"Dora and QA_lora at the same time is not supported for {self.__class__.__name__} (yet)." ) elif use_dora: from .variants import DoraLinearVariant variant = DoraLinearVariant() elif use_qalora: from .variants import QALoraLinearVariant variant = QALoraLinearVariant() else: variant = None return variant def forward(self, x: torch.Tensor): # note: logic differs from default Linear because merging is not supported result = self.quant_linear_module(x) if self.disable_adapters: return result lora_A_keys = self.lora_A.keys() for active_adapter in self.active_adapters: if active_adapter not in lora_A_keys: continue torch_result_dtype = result.dtype lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = self._cast_input_dtype(x, lora_A.weight.dtype) if active_adapter not in self.lora_variant: # vanilla LoRA result = result + lora_B(lora_A(dropout(x))) * scaling else: result = self.lora_variant[active_adapter].forward( self, active_adapter=active_adapter, x=x, result=result, ) result = result.to(torch_result_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep # TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102 # def reset_lora_parameters(self, adapter_name): # if adapter_name in self.lora_A.keys(): # torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight) # torch.nn.init.zeros_(self.lora_B[adapter_name].weight) def dispatch_gptq( target: torch.nn.Module, adapter_name: str, **kwargs: Any, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target cfg = kwargs.get("gptq_quantization_config", None) if is_gptqmodel_available(): from gptqmodel.nn_modules.qlinear import BaseQuantLinear if isinstance(target_base_layer, BaseQuantLinear): new_module = GPTQLoraLinear(target, adapter_name, **kwargs) target.qweight = target_base_layer.qweight else: quant_linear = get_auto_gptq_quant_linear(cfg) if quant_linear is not None and isinstance(target_base_layer, quant_linear): new_module = GPTQLoraLinear(target, adapter_name, **kwargs) target.qweight = target_base_layer.qweight return new_module
peft/src/peft/tuners/lora/gptq.py/0
{ "file_path": "peft/src/peft/tuners/lora/gptq.py", "repo_id": "peft", "token_count": 2419 }
243
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from dataclasses import dataclass, field from typing import Optional, Union from peft.tuners.prompt_tuning import PromptTuningConfig from peft.utils import PeftType class MultitaskPromptTuningInit(str, enum.Enum): # initialize prompt with text TEXT = "TEXT" # initialize prompt with random matrix RANDOM = "RANDOM" # average the prefix and column matrices obtained during source training AVERAGE_SOURCE_TASKS = "AVERAGE_SOURCE_TASKS" # pick prefix and column matrices for a particular task obtained during source training EXACT_SOURCE_TASK = "EXACT_SOURCE_TASK" # only use the prompt embeddings trained during source training ONLY_SOURCE_SHARED = "ONLY_SOURCE_SHARED" @dataclass class MultitaskPromptTuningConfig(PromptTuningConfig): prompt_tuning_init: Union[MultitaskPromptTuningInit, str] = field( default=MultitaskPromptTuningInit.RANDOM, metadata={ "help": ( "How to initialize the prompt tuning parameters. Can be one of TEXT, RANDOM, AVERAGE_SOURCE_TASKS, " "EXACT_SOURCE_TASK, ONLY_SOURCE_SHARED." ), }, ) prompt_tuning_init_state_dict_path: Optional[str] = field( default=None, metadata={ "help": ( "The path of source state dict. This is required when training the downstream target prompt from " "the pretrained source prompt" ), }, ) prompt_tuning_init_task: Optional[int] = field(default=0, metadata={"help": "source task id for initialization"}) num_ranks: Optional[int] = field(default=1, metadata={"help": "ranks"}) num_tasks: Optional[int] = field(default=1, metadata={"help": "number of tasks"}) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.MULTITASK_PROMPT_TUNING
peft/src/peft/tuners/multitask_prompt_tuning/config.py/0
{ "file_path": "peft/src/peft/tuners/multitask_prompt_tuning/config.py", "repo_id": "peft", "token_count": 899 }
244
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Based on implementation made available in https://github.com/ppetrushkov/peft/tree/road (not from paper authors) from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.utils import register_peft_method from .config import RoadConfig from .layer import Linear, RoadLayer from .model import RoadModel __all__ = [ "Linear", "RoadConfig", "RoadLayer", "RoadModel", ] register_peft_method(name="road", config_cls=RoadConfig, model_cls=RoadModel, is_mixed_compatible=True) def __getattr__(name): if (name == "Linear8bitLt") and is_bnb_available(): from .bnb import Linear8bitLt return Linear8bitLt if (name == "Linear4bit") and is_bnb_4bit_available(): from .bnb import Linear4bit return Linear4bit raise AttributeError(f"module {__name__} has no attribute {name}")
peft/src/peft/tuners/road/__init__.py/0
{ "file_path": "peft/src/peft/tuners/road/__init__.py", "repo_id": "peft", "token_count": 464 }
245
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass, field from typing import Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class VBLoRAConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`VBLoRAConfig`]. Paper: https://huggingface.co/papers/2405.15179 Args: r (`int`): The rank of incremental matrices. num_vectors (`int`): Number of vectors in the vector bank. Use higher values when the model size increases. vector_length (`int`): The length of the vectors in the vector bank. The length of the vectors should be divisible by the hidden dimension of the model. topk (`int`): The K value for top-K selection. A larger value of K increases the size of the saved model. In practice, setting K=2 typically provides the best performance and parameter efficiency. For more details, refer to the discussion in the paper. target_modules (`Union[List[str], str]`): The names of the modules to apply the adapter to. If this is specified, only the modules with the specified names will be replaced. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen, excluding the output layer. If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. exclude_modules (`Optional[Union[List[str], str]]`): The names of the modules to not apply the adapter. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. save_only_topk_weights (`bool`): Whether to only save the topk weights. Setting `save_only_topk_weights = True` significantly reduces storage space. However, models saved in this mode can be used for merging or inference only, not for resuming training. vblora_dropout (`float`): The dropout probability for VBLoRA layers. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. bias (`str`): Bias type for VBLoRA. Can be 'none', 'all' or 'vblora_only'. If 'all' or 'vblora_only', the corresponding biases will be updated during training. Be aware that this means that, even when disabling the adapters, the model will not produce the same output as the base model would have without adaptation. modules_to_save (`List[str]`): List of modules apart from VBLoRA layers to be set as trainable and saved in the final checkpoint. init_vector_bank_bound (`float`): The vector bank is initialized with a uniform distribution between -init_vector_bank_bound and init_vector_bank_bound. Avoid initializing the vector bank with all zeros to prevent zero gradients. A small value, such as 0.02, is typically effective. Initializing with a large value may cause training instability. init_logits_std (`float`): The logits are initialized with a normal distribution with a standard deviation of init_logits_std. Default is 0.1. layers_to_transform (`Union[List[int],int]`): The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices that are specified in this list. If a single integer is passed, it will apply the transformations on the layer at this index. layers_pattern (`Optional[Union[List[str], str]]`): The layer pattern name, used only if `layers_to_transform` is different from `None`. This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`. """ r: int = field(default=4, metadata={"help": "The rank of incremental matrices."}) num_vectors: int = field( default=256, metadata={"help": "Number of vectors in the vector bank. Use higher values when the model size increases."}, ) vector_length: int = field( default=256, metadata={ "help": "The length of the vectors in the vector bank. The length of the vectors should be divisible by " "the hidden dimension of the model." }, ) topk: int = field( default=2, metadata={ "help": "The K value for top-K selection. A larger value of K increases the size of the saved model. " "In practice, setting K=2 typically provides the best performance and parameter efficiency. " "For more details, refer to the discussion in the paper." }, ) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "List of module names or regex expression of the module names to replace with LoRA." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'." "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." "If not specified, modules will be chosen according to the model architecture, If the architecture is " "not known, an error will be raised -- in this case, you should specify the target modules manually." ) }, ) exclude_modules: Optional[Union[list[str], str]] = field( default=None, metadata={"help": "List of module names or regex expression of the module names to exclude from VBLoRA."}, ) save_only_topk_weights: bool = field( default=False, metadata={ "help": ( "Whether to only save the topk weights. Setting `save_only_topk_weights = True` significantly reduces " "storage space. However, models saved in this mode can be used for merging or inference only, not for " "resuming training." ) }, ) vblora_dropout: float = field(default=0.0, metadata={"help": "VBLoRA dropout"}) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) bias: str = field(default="none", metadata={"help": "Bias type for VBLoRA. Can be 'none', 'all' or 'vblora_only'"}) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": ( "List of modules apart from VBLoRA layers to be set as trainable and saved in the final checkpoint. For" " example, in Sequence Classification or Token Classification tasks, the final layer" " `classifier/score` are randomly initialized and as such need to be trainable and saved." ) }, ) init_vector_bank_bound: float = field( default=0.02, metadata={ "help": ( "The vector bank is initialized with a uniform distribution between -init_vector_bank_bound and" " init_vector_bank_bound. Avoid initializing the vector bank with all zeros to prevent zero gradients." " A small value, such as 0.02, is typically effective. Initializing with a large value may cause" " training instability." ), }, ) init_logits_std: float = field( default=0.1, metadata={ "help": ( "The logits are initialized with a normal distribution with a standard deviation of init_logits_std. " "Default value 0.1 typically works well." ), }, ) layers_to_transform: Optional[Union[list[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. " "This only works when target_modules is a list of str. This should target the `nn.ModuleList` of the " "model, which is often called `'layers'` or `'h'`." }, ) layers_pattern: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." "This only works when target_modules is a list of str." }, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.VBLORA self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) self.exclude_modules = ( set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules ) # check for layers_to_transform and layers_pattern if self.layers_pattern and not self.layers_to_transform: raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ")
peft/src/peft/tuners/vblora/config.py/0
{ "file_path": "peft/src/peft/tuners/vblora/config.py", "repo_id": "peft", "token_count": 3932 }
246
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import torch class IncrementalPCA: """ An implementation of Incremental Principal Components Analysis (IPCA) that leverages PyTorch for GPU acceleration. Adapted from https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/decomposition/_incremental_pca.py This class provides methods to fit the model on data incrementally in batches, and to transform new data based on the principal components learned during the fitting process. Args: n_components (int, optional): Number of components to keep. If `None`, it's set to the minimum of the number of samples and features. Defaults to None. copy (bool): If False, input data will be overwritten. Defaults to True. batch_size (int, optional): The number of samples to use for each batch. Only needed if self.fit is called. If `None`, it's inferred from the data and set to `5 * n_features`. Defaults to None. svd_driver (str, optional): name of the cuSOLVER method to be used for torch.linalg.svd. This keyword argument only works on CUDA inputs. Available options are: None, gesvd, gesvdj, and gesvda. Defaults to None. lowrank (bool, optional): Whether to use torch.svd_lowrank instead of torch.linalg.svd which can be faster. Defaults to False. lowrank_q (int, optional): For an adequate approximation of n_components, this parameter defaults to n_components * 2. lowrank_niter (int, optional): Number of subspace iterations to conduct for torch.svd_lowrank. Defaults to 4. lowrank_seed (int, optional): Seed for making results of torch.svd_lowrank reproducible. """ def __init__( self, n_components: Optional[int] = None, copy: Optional[bool] = True, batch_size: Optional[int] = None, svd_driver: Optional[str] = None, lowrank: bool = False, lowrank_q: Optional[int] = None, lowrank_niter: int = 4, lowrank_seed: Optional[int] = None, ): self.n_components = n_components self.copy = copy self.batch_size = batch_size self.svd_driver = svd_driver self.lowrank = lowrank self.lowrank_q = lowrank_q self.lowrank_niter = lowrank_niter self.lowrank_seed = lowrank_seed self.n_features_ = None if self.lowrank: self._validate_lowrank_params() def _validate_lowrank_params(self): if self.lowrank_q is None: if self.n_components is None: raise ValueError("n_components must be specified when using lowrank mode with lowrank_q=None.") self.lowrank_q = self.n_components * 2 elif self.lowrank_q < self.n_components: raise ValueError("lowrank_q must be greater than or equal to n_components.") def _svd_fn_full(self, X): return torch.linalg.svd(X, full_matrices=False, driver=self.svd_driver) def _svd_fn_lowrank(self, X): seed_enabled = self.lowrank_seed is not None with torch.random.fork_rng(enabled=seed_enabled): if seed_enabled: torch.manual_seed(self.lowrank_seed) U, S, V = torch.svd_lowrank(X, q=self.lowrank_q, niter=self.lowrank_niter) return U, S, V.mH def _validate_data(self, X) -> torch.Tensor: """ Validates and converts the input data `X` to the appropriate tensor format. Args: X (torch.Tensor): Input data. Returns: torch.Tensor: Converted to appropriate format. """ valid_dtypes = [torch.float32, torch.float64] if not isinstance(X, torch.Tensor): X = torch.tensor(X, dtype=torch.float32) elif self.copy: X = X.clone() n_samples, n_features = X.shape if self.n_components is None: pass elif self.n_components > n_features: raise ValueError( f"n_components={self.n_components} invalid for n_features={n_features}, " "need more rows than columns for IncrementalPCA processing." ) elif self.n_components > n_samples: raise ValueError( f"n_components={self.n_components} must be less or equal to the batch number of samples {n_samples}" ) if X.dtype not in valid_dtypes: X = X.to(torch.float32) return X @staticmethod def _incremental_mean_and_var( X, last_mean, last_variance, last_sample_count ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Computes the incremental mean and variance for the data `X`. Args: X (torch.Tensor): The batch input data tensor with shape (n_samples, n_features). last_mean (torch.Tensor): The previous mean tensor with shape (n_features,). last_variance (torch.Tensor): The previous variance tensor with shape (n_features,). last_sample_count (torch.Tensor): The count tensor of samples processed before the current batch. Returns: Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Updated mean, variance tensors, and total sample count. """ if X.shape[0] == 0: return last_mean, last_variance, last_sample_count if last_sample_count > 0: if last_mean is None: raise ValueError("last_mean should not be None if last_sample_count > 0.") if last_variance is None: raise ValueError("last_variance should not be None if last_sample_count > 0.") new_sample_count = torch.tensor([X.shape[0]], device=X.device) updated_sample_count = last_sample_count + new_sample_count if last_mean is None: last_sum = torch.zeros(X.shape[1], dtype=torch.float64, device=X.device) else: last_sum = last_mean * last_sample_count new_sum = X.sum(dim=0, dtype=torch.float64) updated_mean = (last_sum + new_sum) / updated_sample_count T = new_sum / new_sample_count temp = X - T correction = temp.sum(dim=0, dtype=torch.float64).square() temp.square_() new_unnormalized_variance = temp.sum(dim=0, dtype=torch.float64) new_unnormalized_variance -= correction / new_sample_count if last_variance is None: updated_variance = new_unnormalized_variance / updated_sample_count else: last_unnormalized_variance = last_variance * last_sample_count last_over_new_count = last_sample_count.double() / new_sample_count updated_unnormalized_variance = ( last_unnormalized_variance + new_unnormalized_variance + last_over_new_count / updated_sample_count * (last_sum / last_over_new_count - new_sum).square() ) updated_variance = updated_unnormalized_variance / updated_sample_count return updated_mean, updated_variance, updated_sample_count @staticmethod def _svd_flip(u, v, u_based_decision=True) -> tuple[torch.Tensor, torch.Tensor]: """ Adjusts the signs of the singular vectors from the SVD decomposition for deterministic output. This method ensures that the output remains consistent across different runs. Args: u (torch.Tensor): Left singular vectors tensor. v (torch.Tensor): Right singular vectors tensor. u_based_decision (bool, optional): If True, uses the left singular vectors to determine the sign flipping. Defaults to True. Returns: Tuple[torch.Tensor, torch.Tensor]: Adjusted left and right singular vectors tensors. """ if u_based_decision: max_abs_cols = torch.argmax(torch.abs(u), dim=0) signs = torch.sign(u[max_abs_cols, range(u.shape[1])]) else: max_abs_rows = torch.argmax(torch.abs(v), dim=1) signs = torch.sign(v[range(v.shape[0]), max_abs_rows]) u *= signs[: u.shape[1]].view(1, -1) v *= signs.view(-1, 1) return u, v def fit(self, X, check_input=True): """ Fits the model with data `X` using minibatches of size `batch_size`. Args: X (torch.Tensor): The input data tensor with shape (n_samples, n_features). check_input (bool, optional): If True, validates the input. Defaults to True. Returns: IncrementalPCA: The fitted IPCA model. """ if check_input: X = self._validate_data(X) n_samples, n_features = X.shape if self.batch_size is None: self.batch_size = 5 * n_features for batch in self.gen_batches(n_samples, self.batch_size, min_batch_size=self.n_components or 0): self.partial_fit(X[batch], check_input=False) return self def partial_fit(self, X, check_input=True): """ Incrementally fits the model with batch data `X`. Args: X (torch.Tensor): The batch input data tensor with shape (n_samples, n_features). check_input (bool, optional): If True, validates the input. Defaults to True. Returns: IncrementalPCA: The updated IPCA model after processing the batch. """ first_pass = not hasattr(self, "components_") if check_input: X = self._validate_data(X) n_samples, n_features = X.shape # Initialize attributes to avoid errors during the first call to partial_fit if first_pass: self.mean_ = None # Will be initialized properly in _incremental_mean_and_var based on data dimensions self.var_ = None # Will be initialized properly in _incremental_mean_and_var based on data dimensions self.n_samples_seen_ = torch.tensor([0], device=X.device) self.n_features_ = n_features if not self.n_components: self.n_components = min(n_samples, n_features) if n_features != self.n_features_: raise ValueError( "Number of features of the new batch does not match the number of features of the first batch." ) col_mean, col_var, n_total_samples = self._incremental_mean_and_var( X, self.mean_, self.var_, self.n_samples_seen_ ) if first_pass: X -= col_mean else: col_batch_mean = torch.mean(X, dim=0) X -= col_batch_mean mean_correction_factor = torch.sqrt((self.n_samples_seen_.double() / n_total_samples) * n_samples) mean_correction = mean_correction_factor * (self.mean_ - col_batch_mean) X = torch.vstack( ( self.singular_values_.view((-1, 1)) * self.components_, X, mean_correction, ) ) if self.lowrank: U, S, Vt = self._svd_fn_lowrank(X) else: U, S, Vt = self._svd_fn_full(X) U, Vt = self._svd_flip(U, Vt, u_based_decision=False) explained_variance = S**2 / (n_total_samples - 1) explained_variance_ratio = S**2 / torch.sum(col_var * n_total_samples) self.n_samples_seen_ = n_total_samples self.components_ = Vt[: self.n_components] self.singular_values_ = S[: self.n_components] self.mean_ = col_mean self.var_ = col_var self.explained_variance_ = explained_variance[: self.n_components] self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components] if self.n_components not in (n_samples, n_features): self.noise_variance_ = explained_variance[self.n_components :].mean() else: self.noise_variance_ = torch.tensor(0.0, device=X.device) return self def transform(self, X) -> torch.Tensor: """ Applies dimensionality reduction to `X`. The input data `X` is projected on the first principal components previously extracted from a training set. Args: X (torch.Tensor): New data tensor with shape (n_samples, n_features) to be transformed. Returns: torch.Tensor: Transformed data tensor with shape (n_samples, n_components). """ X = X - self.mean_ return torch.mm(X.double(), self.components_.T).to(X.dtype) @staticmethod def gen_batches(n: int, batch_size: int, min_batch_size: int = 0): """Generator to create slices containing `batch_size` elements from 0 to `n`. The last slice may contain less than `batch_size` elements, when `batch_size` does not divide `n`. Args: n (int): Size of the sequence. batch_size (int): Number of elements in each batch. min_batch_size (int, optional): Minimum number of elements in each batch. Defaults to 0. Yields: slice: A slice of `batch_size` elements. """ start = 0 for _ in range(int(n // batch_size)): end = start + batch_size if end + min_batch_size > n: continue yield slice(start, end) start = end if start < n: yield slice(start, n)
peft/src/peft/utils/incremental_pca.py/0
{ "file_path": "peft/src/peft/utils/incremental_pca.py", "repo_id": "peft", "token_count": 6134 }
247
import torch from peft.tuners._buffer_dict import BufferDict class TestBufferDict: def test_init_from_dict_works(self): bd = BufferDict( { "default": torch.randn(10, 2), } ) def test_update_from_other_bufferdict(self): default_tensor = torch.randn(10, 2) non_default_tensor = torch.randn(10, 2) bd1 = BufferDict({"default": default_tensor}) bd2 = BufferDict({"non_default": non_default_tensor}) bd1.update(bd2) assert set(bd1.keys()) == {"default", "non_default"} assert torch.allclose(bd1["default"], default_tensor) assert torch.allclose(bd1["non_default"], non_default_tensor) def test_update_from_dict(self): default_tensor = torch.randn(10, 2) non_default_tensor = torch.randn(10, 2) bd1 = BufferDict({"default": default_tensor}) d1 = {"non_default": non_default_tensor} bd1.update(d1) assert set(bd1.keys()) == {"default", "non_default"} assert torch.allclose(bd1["default"], default_tensor) assert torch.allclose(bd1["non_default"], non_default_tensor) def test_update_from_dict_items(self): default_tensor = torch.randn(10, 2) non_default_tensor = torch.randn(10, 2) bd1 = BufferDict({"default": default_tensor}) d1 = {"non_default": non_default_tensor} bd1.update(d1.items()) assert set(bd1.keys()) == {"default", "non_default"} assert torch.allclose(bd1["default"], default_tensor) assert torch.allclose(bd1["non_default"], non_default_tensor)
peft/tests/test_bufferdict.py/0
{ "file_path": "peft/tests/test_bufferdict.py", "repo_id": "peft", "token_count": 740 }
248
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import torch from torch import nn from peft import LoraConfig, get_peft_model from peft.tuners.lora.layer import Conv1d as LoraConv1d from peft.tuners.lora.layer import Conv2d as LoraConv2d from peft.tuners.lora.layer import Embedding as LoraEmbedding from peft.tuners.lora.layer import Linear as LoraLinear from peft.tuners.lora.variants import ( DoraConv1dVariant, DoraConv2dVariant, DoraEmbeddingVariant, DoraLinearVariant, ) class CustomModel(nn.Module): """pytorch module that contains common targetable layers (linear, embedding, conv, ...)""" def __init__(self, num_embeddings=100, embedding_dim=16, num_classes=10): super().__init__() self.embedding = nn.Embedding(num_embeddings, embedding_dim) self.conv1d = nn.Conv1d(in_channels=embedding_dim, out_channels=32, kernel_size=3, padding=1) self.conv2d = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=1) self.flatten = nn.Flatten() self.dummy_conv1d_output_dim = 32 * 10 self.dummy_conv2d_output_dim = 16 * 10 * 10 self.linear1 = nn.Linear(self.dummy_conv1d_output_dim + self.dummy_conv2d_output_dim, 64) self.linear2 = nn.Linear(64, num_classes) self.relu = nn.ReLU() def forward(self, input_ids, dummy_image_input): # Path 1: Embedding -> Conv1d x1 = self.embedding(input_ids) # (batch_size, seq_len, embedding_dim) x1 = x1.transpose(1, 2) # (batch_size, embedding_dim, seq_len) x1 = self.relu(self.conv1d(x1)) # (batch_size, 32, seq_len) x1_flat = self.flatten(x1) # Path 2: Conv2d -> Linear x2 = self.relu(self.conv2d(dummy_image_input)) # (batch_size, 16, H, W) x2_flat = self.flatten(x2) # (batch_size, 16*H*W) # Combine or select paths if making a functional model. # For this test, we mainly care about layer types, so forward might not be fully executed. # Let's use x2_flat for subsequent linear layers. output = self.relu(self.linear1(torch.concat([x1_flat, x2_flat], dim=1))) output = self.linear2(output) return output VARIANT_MAP = { "dora": { LoraLinear: DoraLinearVariant, LoraEmbedding: DoraEmbeddingVariant, LoraConv1d: DoraConv1dVariant, LoraConv2d: DoraConv2dVariant, } } TEST_CASES = [ ( "dora", LoraConfig, {"target_modules": ["linear1", "linear2", "conv1d", "conv2d", "embedding"], "use_dora": True}, ), ] class TestLoraVariants: @pytest.mark.parametrize("variant_name, config_cls, config_kwargs", TEST_CASES) def test_variant_is_applied_to_layers(self, variant_name, config_cls, config_kwargs): # This test assumes that targeting and replacing layers works and that after `get_peft_model` we # have a model with LoRA layers. We just make sure that each LoRA layer has its variant set and # it is also the correct variant for that layer. base_model = CustomModel() peft_config = config_cls(**config_kwargs) peft_model = get_peft_model(base_model, peft_config) layer_type_map = VARIANT_MAP[variant_name] for _, module in peft_model.named_modules(): if not hasattr(module, "lora_variant"): continue # Note that not every variant supports every layer. If it is not mapped it is deemed unsupported and # will not be tested. expected_variant_type = layer_type_map.get(type(module), None) if not expected_variant_type: continue assert isinstance(module.lora_variant["default"], expected_variant_type) def custom_model_with_loss_backpropagated(self, peft_config): """Returns the CustomModel + PEFT model instance with a dummy loss that was backpropagated once.""" base_model = CustomModel() peft_model = get_peft_model(base_model, peft_config) x, y = torch.ones(10, 10).long(), torch.ones(10, 1, 10, 10) out = peft_model(x, y) loss = out.sum() loss.backward() return base_model, peft_model def test_dora_params_have_gradients(self): """Ensure that the parameters added by the DoRA variant are participating in the output computation.""" layer_names = ["linear1", "linear2", "conv1d", "conv2d", "embedding"] peft_config = LoraConfig(target_modules=layer_names, use_dora=True) base_model, peft_model = self.custom_model_with_loss_backpropagated(peft_config) for layer in layer_names: assert getattr(peft_model.base_model.model, layer).lora_magnitude_vector["default"].weight.grad is not None
peft/tests/test_lora_variants.py/0
{ "file_path": "peft/tests/test_lora_variants.py", "repo_id": "peft", "token_count": 2196 }
249
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import unittest from copy import deepcopy import pytest import torch from diffusers import StableDiffusionPipeline from parameterized import parameterized from torch import nn from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, BitsAndBytesConfig, ) from transformers.pytorch_utils import Conv1D from peft import ( AdaptionPromptConfig, IA3Config, LoHaConfig, LoraConfig, PeftModel, PromptTuningConfig, VeraConfig, get_layer_status, get_model_status, get_peft_model, ) from peft.tuners.lora.layer import LoraLayer from peft.tuners.tuners_utils import ( BaseTuner, BaseTunerLayer, _maybe_include_all_linear_layers, check_target_module_exists, inspect_matched_modules, ) from peft.tuners.tuners_utils import ( _find_minimal_target_modules as find_minimal_target_modules, ) from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND, ModulesToSaveWrapper, infer_device from peft.utils.constants import DUMMY_MODEL_CONFIG, MIN_TARGET_MODULES_FOR_OPTIMIZATION from .testing_utils import hub_online_once, require_bitsandbytes, require_non_cpu # Implements tests for regex matching logic common for all BaseTuner subclasses, and # tests for correct behaviour with different config kwargs for BaseTuners (Ex: feedforward for IA3, etc) and # tests for utility function to include all linear layers REGEX_TEST_CASES = [ # tuple of # 1. key # 2. target_modules # 3. layers_to_transform # 4. layers_pattern # 5. expected result # some basic examples ("", [], None, None, False), ("", ["foo"], None, None, False), ("foo", [], None, None, False), ("foo", ["foo"], None, None, True), ("foo", ["bar"], None, None, False), ("foo", ["foo", "bar"], None, None, True), # with regex ("foo", "foo", None, None, True), ("foo", ".*oo", None, None, True), ("foo", "fo.*", None, None, True), ("foo", ".*bar.*", None, None, False), ("foobar", ".*oba.*", None, None, True), # with layers_to_transform ("foo.bar.1.baz", ["baz"], [1], ["bar"], True), ("foo.bar.1.baz", ["baz"], [0], ["bar"], False), ("foo.bar.1.baz", ["baz"], [2], ["bar"], False), ("foo.bar.10.baz", ["baz"], [0], ["bar"], False), ("foo.bar.10.baz", ["baz"], [1], ["bar"], False), ("foo.bar.1.baz", ["baz"], [0, 1, 2], ["bar"], True), ("foo.bar.1.baz", ["baz", "spam"], [1], ["bar"], True), ("foo.bar.1.baz", ["baz", "spam"], [0, 1, 2], ["bar"], True), # empty layers_pattern ("foo.whatever.1.baz", ["baz"], [1], [], True), ("foo.whatever.1.baz", ["baz"], [0], [], False), ("foo.whatever.1.baz", ["baz"], [1], "", True), ("foo.whatever.1.baz", ["baz"], [0], "", False), ("foo.whatever.1.baz", ["baz"], [1], None, True), ("foo.whatever.1.baz", ["baz"], [0], None, False), # some realistic examples: transformers model ("transformer.h.1.attn.attention.q_proj.foo", ["q_proj"], None, [], False), ("transformer.h.1.attn.attention.q_proj", [], None, [], False), ("transformer.h.1.attn.attention.q_proj", ["q_proj"], None, [], True), ("transformer.h.1.attn.attention.q_proj", ["q_proj", "v_proj"], None, [], True), ("transformer.h.1.attn.attention.resid_dropout", ["q_proj", "v_proj"], None, [], False), ("transformer.h.1.attn.attention.q_proj", ["q_proj"], [1], ["h"], True), ("transformer.h.1.attn.attention.q_proj", ["q_proj"], [0], ["h"], False), ("transformer.h.1.attn.attention.q_proj", ["q_proj"], [2], ["h"], False), ("transformer.h.1.attn.attention.q_proj", ["q_proj"], [0, 1, 2], ["h"], True), ("transformer.h.1.attn.attention.q_proj", ["q_proj", "v_proj"], [0, 1, 2], ["h"], True), ("foo.bar.q_proj", ["q_proj"], None, [], True), ("foo.bar.1.baz", ["baz"], [1], ["foo"], False), # other corner cases. For ex, below is a case where layers_pattern # is one of the target nn.modules ("foo.bar.1.baz", ["baz"], [1], ["baz"], False), # here, layers_pattern is 'bar', but only keys that contain '.bar' are valid. ("bar.1.baz", ["baz"], [1], ["bar"], False), ("foo.bar.001.baz", ["baz"], [1], ["bar"], True), ("foo.bar.1.spam.2.baz", ["baz"], [1], ["bar"], True), ("foo.bar.2.spam.1.baz", ["baz"], [1], ["bar"], False), # some realistic examples: module using nn.Sequential # for the below test case, key should contain '.blocks' to be valid, because of how layers_pattern is matched ("blocks.1.weight", ["weight"], [1], ["blocks"], False), ("blocks.1.bias", ["weight"], [1], ["blocks"], False), ("mlp.blocks.1.weight", ["weight"], [1], ["blocks"], True), ("mlp.blocks.1.bias", ["weight"], [1], ["blocks"], False), ] MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_CASES = [ # model_name, model_type, initial_target_modules, expected_target_modules # test for a causal Llama model ( "HuggingFaceH4/tiny-random-LlamaForCausalLM", "causal", INCLUDE_LINEAR_LAYERS_SHORTHAND, ["k_proj", "v_proj", "q_proj", "o_proj", "down_proj", "up_proj", "gate_proj"], ), # test for a Llama model without the LM head ( "HuggingFaceH4/tiny-random-LlamaForCausalLM", "base", INCLUDE_LINEAR_LAYERS_SHORTHAND, ["k_proj", "v_proj", "q_proj", "o_proj", "down_proj", "up_proj", "gate_proj"], ), # test for gpt2 with Conv1D layers ("hf-internal-testing/tiny-random-gpt2", "causal", INCLUDE_LINEAR_LAYERS_SHORTHAND, ["c_attn", "c_proj", "c_fc"]), # test for T5 model ( "hf-internal-testing/tiny-random-t5", "seq2seq", INCLUDE_LINEAR_LAYERS_SHORTHAND, ["k", "q", "v", "o", "wi", "wo"], ), # test for GPTNeoX. output module list should exclude classification head - which is named as "embed_out" instead of the usual "lm_head" for GPTNeoX ( "hf-internal-testing/tiny-random-GPTNeoXForCausalLM", "causal", INCLUDE_LINEAR_LAYERS_SHORTHAND, ["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"], ), ] # tests for a few args that should remain unchanged MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_INTERNALS = [ # initial_target_modules, expected_target_modules (["k_proj"], ["k_proj"]), # test with target_modules as None (None, None), # test with target_modules as a regex expression (".*(q_proj|v_proj)$", ".*(q_proj|v_proj)$"), ] BNB_QUANTIZATIONS = [("4bit",), ("8bit",)] BNB_TEST_CASES = [(x + y) for x in MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_CASES for y in BNB_QUANTIZATIONS] class PeftCustomKwargsTester(unittest.TestCase): r""" Test if the PeftModel is instantiated with correct behaviour for custom kwargs. This includes: - test if regex matching works correctly - test if adapters handle custom kwargs the right way e.g. IA3 for `feedforward_modules` """ transformers_class_map = {"causal": AutoModelForCausalLM, "seq2seq": AutoModelForSeq2SeqLM, "base": AutoModel} @parameterized.expand(REGEX_TEST_CASES) def test_regex_matching_valid(self, key, target_modules, layers_to_transform, layers_pattern, expected_result): # We use a LoRA Config for testing, but the regex matching function is common for all BaseTuner subclasses. # example model_id for config initialization. key is matched only against the target_modules given, so this can be any model model_id = "peft-internal-testing/tiny-OPTForCausalLM-lora" config = LoraConfig( base_model_name_or_path=model_id, target_modules=target_modules, layers_pattern=layers_pattern, layers_to_transform=layers_to_transform, ) actual_result = bool(check_target_module_exists(config, key)) assert actual_result == expected_result def test_module_matching_lora(self): # peft models that have a module matching method to inspect the matching modules to allow # users to easily debug their configuration. Here we only test a single case, not all possible combinations of # configs that could exist. This is okay as the method calls `check_target_module_exists` internally, which # has been extensively tested above. model_id = "hf-internal-testing/tiny-random-BloomForCausalLM" with hub_online_once(model_id): model = AutoModel.from_pretrained(model_id) # by default, this model matches query_key_value config = LoraConfig() peft_model = get_peft_model(model, config) output = inspect_matched_modules(peft_model) # inspects default adapter for peft_model matched = output["matched"] expected = [ "h.0.self_attention.query_key_value", "h.1.self_attention.query_key_value", "h.2.self_attention.query_key_value", "h.3.self_attention.query_key_value", "h.4.self_attention.query_key_value", ] assert matched == expected # module lists should match exactly # no overlap with matched modules unmatched = output["unmatched"] for key in expected: assert key not in unmatched def test_feedforward_matching_ia3(self): model_id = "hf-internal-testing/tiny-random-T5ForConditionalGeneration" with hub_online_once(model_id): model = AutoModelForSeq2SeqLM.from_pretrained(model_id) # simple example for just one t5 block for testing config_kwargs = { "target_modules": ".*encoder.*block.0.*(SelfAttention|EncDecAttention|DenseReluDense).(k|q|v|wo|wi)$", "feedforward_modules": ["wo", "wi"], } config = IA3Config(base_model_name_or_path=model_id, **config_kwargs) peft_model = get_peft_model(model, config) output = inspect_matched_modules(peft_model) # inspects default adapter for peft_model matched = output["matched"] expected = [ "encoder.block.0.layer.0.SelfAttention.q", "encoder.block.0.layer.0.SelfAttention.k", "encoder.block.0.layer.0.SelfAttention.v", "encoder.block.0.layer.1.DenseReluDense.wi", "encoder.block.0.layer.1.DenseReluDense.wo", ] expected_feedforward = [ "encoder.block.0.layer.1.DenseReluDense.wi", "encoder.block.0.layer.1.DenseReluDense.wo", ] assert matched == expected # not required since we do similar checks above, but just to be sure module_dict = dict(model.named_modules()) for key in matched: module = module_dict[key] if key in expected_feedforward: assert module.is_feedforward else: # other IA3 modules should not be marked as feedforward assert not module.is_feedforward @parameterized.expand(MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_CASES) def test_maybe_include_all_linear_layers_lora( self, model_id, model_type, initial_target_modules, expected_target_modules ): with hub_online_once(model_id): model = self.transformers_class_map[model_type].from_pretrained(model_id) config_cls = LoraConfig self._check_match_with_expected_target_modules( model_id, model, config_cls, initial_target_modules, expected_target_modules ) @parameterized.expand(BNB_TEST_CASES) @require_non_cpu @require_bitsandbytes def test_maybe_include_all_linear_layers_lora_bnb( self, model_id, model_type, initial_target_modules, expected_target_modules, quantization ): if quantization == "4bit": config_kwargs = {"quantization_config": BitsAndBytesConfig(load_in_4bit=True)} elif quantization == "8bit": config_kwargs = {"quantization_config": BitsAndBytesConfig(load_in_8bit=True)} with hub_online_once(model_id): model = self.transformers_class_map[model_type].from_pretrained( model_id, device_map="auto", **config_kwargs ) config_cls = LoraConfig self._check_match_with_expected_target_modules( model_id, model, config_cls, initial_target_modules, expected_target_modules ) def _check_match_with_expected_target_modules( self, model_id, model, config_cls, initial_target_modules, expected_target_modules ): """ Helper function for the test for `_maybe_include_all_linear_layers` """ actual_config = config_cls(base_model_name_or_path=model_id, target_modules=initial_target_modules) expected_config = config_cls(base_model_name_or_path=model_id, target_modules=expected_target_modules) model_copy = deepcopy(model) actual_model = get_peft_model(model, peft_config=actual_config) expected_model = get_peft_model(model_copy, peft_config=expected_config) expected_model_module_dict = dict(expected_model.named_modules()) # compare the two models and assert that all layers are of the same type for name, actual_module in actual_model.named_modules(): expected_module = expected_model_module_dict[name] assert type(actual_module) is type(expected_module) def test_maybe_include_all_linear_layers_ia3_loha(self): model_id, initial_target_modules, expected_target_modules = ( "HuggingFaceH4/tiny-random-LlamaForCausalLM", INCLUDE_LINEAR_LAYERS_SHORTHAND, ["k_proj", "v_proj", "q_proj", "o_proj", "down_proj", "up_proj", "gate_proj"], ) with hub_online_once(model_id): model_ia3 = AutoModelForCausalLM.from_pretrained(model_id) model_loha = deepcopy(model_ia3) config_classes = [IA3Config, LoHaConfig] models = [model_ia3, model_loha] for config_cls, model in zip(config_classes, models): self._check_match_with_expected_target_modules( model_id, model, config_cls, initial_target_modules, expected_target_modules ) @parameterized.expand(MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_INTERNALS) def test_maybe_include_all_linear_layers_internals(self, initial_target_modules, expected_target_modules): model_id = "HuggingFaceH4/tiny-random-LlamaForCausalLM" with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) config = LoraConfig(base_model_name_or_path=model_id, target_modules=initial_target_modules) new_config = _maybe_include_all_linear_layers(config, model) if isinstance(expected_target_modules, list): # assert that expected and actual target_modules have the same items assert set(new_config.target_modules) == set(expected_target_modules) else: assert new_config.target_modules == expected_target_modules def test_maybe_include_all_linear_layers_diffusion(self): model_id = "hf-internal-testing/tiny-sd-pipe" with hub_online_once(model_id): model = StableDiffusionPipeline.from_pretrained(model_id) config = LoraConfig(base_model_name_or_path=model_id, target_modules="all-linear") # all linear layers should be converted num_linear = sum(isinstance(module, (nn.Linear, Conv1D)) for module in model.unet.modules()) model.unet = get_peft_model(model.unet, config) num_lora = sum(isinstance(module, LoraLayer) for module in model.unet.modules()) assert num_lora == num_linear def test_maybe_include_all_linear_does_not_target_classifier_head(self): # See issue 2027 # Ensure that if a SEQ_CLS model is being used with target_modules="all-linear", the classification head is not # targeted by the adapter layer. model_id = "HuggingFaceH4/tiny-random-LlamaForCausalLM" with hub_online_once(model_id): model = AutoModelForSequenceClassification.from_pretrained(model_id, num_labels=10) # sanity check assert isinstance(model.score, nn.Linear) num_linear = sum(isinstance(module, (nn.Linear, Conv1D)) for module in model.modules()) config = LoraConfig(task_type="SEQ_CLS", target_modules="all-linear") model = get_peft_model(model, config) assert isinstance(model.base_model.score, ModulesToSaveWrapper) # the bug was that these were lora.Linear instances assert isinstance(model.base_model.score.original_module, nn.Linear) assert isinstance(model.base_model.score.modules_to_save["default"], nn.Linear) # ensure that all but one linear layer was targeted by LoRA num_lora = sum(isinstance(module, LoraLayer) for module in model.modules()) assert num_lora == num_linear - 1 @parameterized.expand(MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_CASES) def test_all_linear_nested_targets_correct_layers( self, model_id, model_type, initial_target_modules, expected_target_modules ): # See 2390 # Ensure that if adapter layers are already applied, we don't get nested adapter layers (e.g. LoRA targeting the # lora_A, lora_B layers) with hub_online_once(model_id): model = self.transformers_class_map[model_type].from_pretrained(model_id) config_cls = LoraConfig self._check_match_with_expected_target_modules( model_id, model, config_cls, initial_target_modules, expected_target_modules ) # re-use the same model, i.e. the adapter is already applied self._check_match_with_expected_target_modules( model_id, model, config_cls, initial_target_modules, expected_target_modules ) def test_add_second_adapter_with_all_linear_works(self): # See 2390 Similar test to test_all_linear_nested_targets_correct_layers above, but using add_adapter instead of # calling get_peft_model in an already adapted model model_id = "HuggingFaceH4/tiny-random-LlamaForCausalLM" with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) # important: don't reuse the first config, since config.target_modules will be overwritten, which would make the # test pass trivially. config0 = LoraConfig(target_modules=INCLUDE_LINEAR_LAYERS_SHORTHAND) config1 = LoraConfig(target_modules=INCLUDE_LINEAR_LAYERS_SHORTHAND) model = get_peft_model(model, config0) model.add_adapter(adapter_name="other", peft_config=config1) # both configs should result in the same target modules being chosen (remember that config.target_modules will # be replaced by the actual set of target_modules) assert config0.target_modules == config1.target_modules for layer in model.base_model.model.model.layers: projs = ( layer.self_attn.q_proj, layer.self_attn.v_proj, layer.self_attn.k_proj, layer.mlp.gate_proj, layer.mlp.up_proj, layer.mlp.down_proj, ) for proj in projs: # the targted layer itself, which in the base model was the nn.Linear layer, is now a LoraLayer assert isinstance(proj, LoraLayer) # all children of that layer are still normal nn.Linear layers assert isinstance(proj.base_layer, nn.Linear) assert isinstance(proj.lora_A["default"], nn.Linear) assert isinstance(proj.lora_B["default"], nn.Linear) assert isinstance(proj.lora_A["other"], nn.Linear) assert isinstance(proj.lora_B["other"], nn.Linear) class MLP(nn.Module): def __init__(self, bias=True): super().__init__() self.lin0 = nn.Linear(10, 20, bias=bias) self.relu = nn.ReLU() self.drop = nn.Dropout(0.5) self.lin1 = nn.Linear(20, 2, bias=bias) self.sm = nn.LogSoftmax(dim=-1) class TestTargetedModuleNames(unittest.TestCase): """Check that the attribute targeted_module_names is correctly set. This checks LoRA and IAยณ, but this should be sufficient, testing all other tuners is not necessary. """ def test_one_targeted_module_regex(self): model = MLP() model = get_peft_model(model, LoraConfig(target_modules="lin0")) assert model.targeted_module_names == ["lin0"] def test_two_targeted_module_regex(self): model = MLP() model = get_peft_model(model, LoraConfig(target_modules="lin.*")) assert model.targeted_module_names == ["lin0", "lin1"] def test_one_targeted_module_list(self): model = MLP() model = get_peft_model(model, LoraConfig(target_modules=["lin0"])) assert model.targeted_module_names == ["lin0"] def test_two_targeted_module_list(self): model = MLP() model = get_peft_model(model, LoraConfig(target_modules=["lin0", "lin1"])) assert model.targeted_module_names == ["lin0", "lin1"] def test_ia3_targeted_module_regex(self): model = MLP() model = get_peft_model(model, IA3Config(target_modules=".*lin.*", feedforward_modules=".*lin.*")) assert model.targeted_module_names == ["lin0", "lin1"] def test_ia3_targeted_module_list(self): model = MLP() model = get_peft_model(model, IA3Config(target_modules=["lin0", "lin1"], feedforward_modules=["lin0", "lin1"])) assert model.targeted_module_names == ["lin0", "lin1"] def test_realistic_example(self): model_id = "hf-internal-testing/tiny-random-BloomForCausalLM" with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) config = LoraConfig(task_type="CAUSAL_LM") model = get_peft_model(model, config) expected = [ f"transformer.h.{i}.self_attention.query_key_value" for i in range(len(model.base_model.transformer.h)) ] assert model.targeted_module_names == expected class TestTargetedParameterNames(unittest.TestCase): """Check that the attribute targeted_parameter_names (via target_parameters) is correctly set. This is only implemented for LoRA. Regex matching is currently not implemented. """ def test_one_targeted_parameters_list(self): model = MLP() model = get_peft_model(model, LoraConfig(target_parameters=["lin0.weight"])) assert model.targeted_parameter_names == ["lin0.weight"] def test_two_targeted_parameters_list(self): model = MLP() model = get_peft_model(model, LoraConfig(target_parameters=["lin0.weight", "lin1.weight"])) assert model.targeted_parameter_names == ["lin0.weight", "lin1.weight"] def test_realistic_example(self): model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM" with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) config = LoraConfig(target_modules=[], task_type="CAUSAL_LM", target_parameters=["v_proj.weight"]) model = get_peft_model(model, config) expected = [ f"model.layers.{i}.self_attn.v_proj.weight" for i in range(len(model.base_model.model.model.layers)) ] assert model.targeted_parameter_names == expected class TestExcludedModuleNames(unittest.TestCase): """Check that the attribute exclude_module is correctly set. This checks LoRA and IAยณ, but this should be sufficient, testing all other tuners is not necessary. """ def test_two_excluded_module_regex(self): model = MLP() model = get_peft_model(model, LoraConfig(target_modules=("lin.*"), exclude_modules="lin0")) assert model.targeted_module_names == ["lin1"] def test_two_excluded_module_list(self): model = MLP() model = get_peft_model(model, LoraConfig(target_modules=["lin0", "lin1"], exclude_modules="lin0")) assert model.targeted_module_names == ["lin1"] def test_multiple_excluded_modules_list(self): model = MLP() model = get_peft_model(model, LoraConfig(target_modules=["lin0", "lin1"], exclude_modules=["lin0"])) assert model.targeted_module_names == ["lin1"] def test_ia3_two_excluded_module_regex(self): model = MLP() model = get_peft_model( model, IA3Config(target_modules=".*lin.*", feedforward_modules=".*lin.*", exclude_modules="lin0") ) assert model.targeted_module_names == ["lin1"] def test_ia3_multiple_excluded_modules_list(self): model = MLP() model = get_peft_model( model, IA3Config(target_modules=["lin0", "lin1"], feedforward_modules=".*lin.*", exclude_modules=["lin1"]) ) assert model.targeted_module_names == ["lin0"] def test_all_modules_excluded(self): model = MLP() with pytest.raises(ValueError, match="All modules were excluded"): get_peft_model( model, LoraConfig( target_modules=["lin0", "lin1", "relu", "drop", "sm"], exclude_modules=["lin0", "lin1", "relu", "drop", "sm"], ), ) def test_no_modules_matched(self): model = MLP() with pytest.raises(ValueError, match="Target modules .* not found in the base model"): get_peft_model(model, LoraConfig(target_modules=["non_existent_module"])) def test_some_modules_excluded_some_unmatched(self): model = MLP() with pytest.raises(ValueError, match="No modules were targeted for adaptation"): get_peft_model(model, LoraConfig(target_modules=["lin0", "non_existent_module"], exclude_modules=["lin0"])) def test_exclude_modules_not_used(self): model = MLP() with pytest.warns(UserWarning, match="You have passed exclude_modules=.* but no modules were excluded"): get_peft_model(model, LoraConfig(target_modules=["lin1"], exclude_modules=["non_existent_module"])) def test_realistic_example(self): model_id = "hf-internal-testing/tiny-random-BloomForCausalLM" with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) config = LoraConfig(task_type="CAUSAL_LM", exclude_modules="transformer.h.2.self_attention.query_key_value") model = get_peft_model(model, config) expected = [ f"transformer.h.{i}.self_attention.query_key_value" for i in range(len(model.base_model.transformer.h)) if i != 2 ] assert model.targeted_module_names == expected class TestModelAndLayerStatus: """Check the methods `get_layer_status` and `get_model_status`.` Note that we only test LoRA here but the same logic should work for other tuner types (if they support the corresponding features like merging). """ torch_device = infer_device() @pytest.fixture def small_model(self): class SmallModel(nn.Module): def __init__(self): super().__init__() self.lin0 = nn.Linear(10, 10) self.lin1 = nn.Linear(10, 10) config = LoraConfig(target_modules="lin0") return get_peft_model(SmallModel(), config) @pytest.fixture def large_model(self): class LargeModel(nn.Module): def __init__(self): super().__init__() self.lin0 = nn.Linear(10, 10) self.conv0 = nn.Conv2d(3, 10, 3) self.emb0 = nn.Embedding(10, 10) self.lin1 = nn.Linear(10, 10) self.conv1 = nn.Conv2d(3, 10, 3) self.emb1 = nn.Embedding(10, 10) config0 = LoraConfig(target_modules=["lin0", "conv1", "emb0"]) config1 = LoraConfig(target_modules=["lin0", "lin1"], r=16) model = get_peft_model(LargeModel(), config0) model.add_adapter("other", config1) return model ################ # layer status # ################ def test_layer_names_small(self, small_model): layer_status = small_model.get_layer_status() expected = ["model.lin0"] assert [status.name for status in layer_status] == expected def test_layer_names_large(self, large_model): layer_status = large_model.get_layer_status() result = sorted([status.name for status in layer_status]) expected = ["model.conv1", "model.emb0", "model.lin0", "model.lin1"] assert result == expected def test_module_type_small(self, small_model): layer_status = small_model.get_layer_status() assert [status.module_type for status in layer_status] == ["lora.Linear"] def test_module_type_large(self, large_model): layer_status = large_model.get_layer_status() result = sorted([status.module_type for status in layer_status]) expected = ["lora.Conv2d", "lora.Embedding", "lora.Linear", "lora.Linear"] assert result == expected def test_enabled_small(self, small_model): layer_status = small_model.get_layer_status() assert [status.enabled for status in layer_status] == [True] def test_enabled_large(self, large_model): layer_status = large_model.get_layer_status() result = [status.enabled for status in layer_status] expected = [True, True, True, True] assert result == expected def test_enabled_irregular(self, large_model): # this is an invalid state, but we should still test it # disable a single layer for module in large_model.modules(): if isinstance(module, BaseTunerLayer): module.enable_adapters(False) break layer_status = large_model.get_layer_status() result = [status.enabled for status in layer_status] expected = [False, True, True, True] assert result == expected def test_active_adapters_small(self, small_model): layer_status = small_model.get_layer_status() assert [status.active_adapters for status in layer_status] == [["default"]] def test_active_adapters_large(self, large_model): layer_status = large_model.get_layer_status() result = [status.active_adapters for status in layer_status] # note: as currently implemented, the active adapter can be an adapter that does not exist on this specific # layer, for instance, layer 3 (i.e. index 2) only has the "other" adapter but "default" is still shown as the # active adapter expected = [["default"], ["default"], ["default"], ["default"]] assert result == expected # switch to "other" large_model.set_adapter("other") layer_status = large_model.get_layer_status() result = [status.active_adapters for status in layer_status] expected = [["other"], ["other"], ["other"], ["other"]] def test_merge_adapters_small(self, small_model): layer_status = small_model.get_layer_status() assert [status.merged_adapters for status in layer_status] == [[]] assert [status.available_adapters for status in layer_status] == [["default"]] # now merge "default" small_model.merge_adapter(["default"]) layer_status = small_model.get_layer_status() assert [status.merged_adapters for status in layer_status] == [["default"]] assert [status.available_adapters for status in layer_status] == [["default"]] def test_merge_adapters_large(self, large_model): layer_status = large_model.get_layer_status() result = [status.merged_adapters for status in layer_status] assert result == [[], [], [], []] # now merge "default" large_model.merge_adapter(["default"]) layer_status = large_model.get_layer_status() result = [status.merged_adapters for status in layer_status] # default is on layer 0, 1, and 3 assert result == [["default"], ["default"], [], ["default"]] # now merge "other" large_model.unmerge_adapter() large_model.merge_adapter(["other"]) layer_status = large_model.get_layer_status() result = [status.merged_adapters for status in layer_status] # other is on layer 0 and 2 assert result == [["other"], [], ["other"], []] # now merge both large_model.merge_adapter(["default", "other"]) layer_status = large_model.get_layer_status() result = [status.merged_adapters for status in layer_status] # default is on layer 0, 1, and 3, other is on layer 0 and 2 assert result == [["other", "default"], ["default"], ["other"], ["default"]] def test_requires_grad_small(self, small_model): layer_status = small_model.get_layer_status() assert [status.requires_grad for status in layer_status] == [{"default": True}] def test_requires_grad_large(self, large_model): layer_status = large_model.get_layer_status() result = [status.requires_grad for status in layer_status] # default is on layer 0, 1, and 3, other is on layer 0 and 2 expected = [{"default": True, "other": False}, {"default": True}, {"other": False}, {"default": True}] assert result == expected # now activate "other" large_model.set_adapter("other") layer_status = large_model.get_layer_status() result = [status.requires_grad for status in layer_status] expected = [{"default": False, "other": True}, {"default": False}, {"other": True}, {"default": False}] assert result == expected def test_requires_grad_irregular(self, large_model): # inject an embedding layer with requires_grad=False # this is an invalid state, but we should still test it lora_embedding_A = nn.Parameter(torch.zeros(10, 10)) lora_embedding_B = nn.Parameter(torch.zeros(10, 10)) lora_embedding_A.requires_grad = False lora_embedding_B.requires_grad = False large_model.base_model.model.lin0.lora_embedding_A["default"] = lora_embedding_A large_model.base_model.model.lin0.lora_embedding_B["default"] = lora_embedding_B layer_status = large_model.get_layer_status() result = [status.requires_grad for status in layer_status] expected = [{"default": "irregular", "other": False}, {"default": True}, {"other": False}, {"default": True}] assert result == expected def test_available_adapters_small(self, small_model): layer_status = small_model.get_layer_status() result = [status.available_adapters for status in layer_status] expected = [["default"]] assert result == expected def test_available_adapters_large(self, large_model): layer_status = large_model.get_layer_status() result = [status.available_adapters for status in layer_status] expected = [["default", "other"], ["default"], ["other"], ["default"]] assert result == expected def test_devices_all_cpu_small(self, small_model): layer_status = small_model.get_layer_status() result = [status.devices for status in layer_status] expected = [{"default": ["cpu"]}] assert result == expected def test_devices_all_cpu_large(self, large_model): layer_status = large_model.get_layer_status() result = [status.devices for status in layer_status] expected = [ {"default": ["cpu"], "other": ["cpu"]}, {"default": ["cpu"]}, {"other": ["cpu"]}, {"default": ["cpu"]}, ] assert result == expected @require_non_cpu def test_devices_all_gpu_large(self, large_model): large_model.to(self.torch_device) layer_status = large_model.get_layer_status() result = [status.devices for status in layer_status] expected = [ {"default": [self.torch_device], "other": [self.torch_device]}, {"default": [self.torch_device]}, {"other": [self.torch_device]}, {"default": [self.torch_device]}, ] assert result == expected @require_non_cpu def test_devices_cpu_and_gpu_large(self, large_model): # move the embedding layer to GPU large_model.model.lin0.lora_A["default"] = large_model.model.lin0.lora_A["default"].to(self.torch_device) layer_status = large_model.get_layer_status() result = [status.devices for status in layer_status] expected = [ {"default": ["cpu", self.torch_device], "other": ["cpu"]}, {"default": ["cpu"]}, {"other": ["cpu"]}, {"default": ["cpu"]}, ] assert result == expected def test_target_parameters(self, large_model): # don't check each attribute, just the relevant ones # first remove the normal LoRA layers large_model = large_model.merge_and_unload() config = LoraConfig(target_parameters=["lin0.weight", "lin1.weight"]) large_model = get_peft_model(large_model, config) layer_status = large_model.get_layer_status() assert [status.name for status in layer_status] == ["model.lin0", "model.lin1"] assert [status.module_type for status in layer_status] == ["lora.ParamWrapper"] * 2 def test_target_parameters_and_target_modules(self, large_model): # don't check each attribute, just the relevant ones # first remove the normal LoRA layers large_model = large_model.merge_and_unload() config = LoraConfig(target_parameters=["lin0.weight"], target_modules=["lin1"]) large_model = get_peft_model(large_model, config) layer_status = large_model.get_layer_status() assert [status.name for status in layer_status] == ["model.lin0", "model.lin1"] assert [status.module_type for status in layer_status] == ["lora.ParamWrapper", "lora.Linear"] ################ # model status # ################ def test_base_model_type_small(self, small_model): model_status = small_model.get_model_status() assert model_status.base_model_type == "SmallModel" def test_base_model_type_large(self, large_model): model_status = large_model.get_model_status() assert model_status.base_model_type == "LargeModel" def test_base_model_type_transformers_automodel(self): # ensure that this also works with transformers AutoModels model_id = "google/flan-t5-small" with hub_online_once(model_id): model = AutoModel.from_pretrained(model_id) model = get_peft_model(model, LoraConfig()) model_status = model.get_model_status() assert model_status.base_model_type == "T5Model" def test_adapter_model_type_small(self, small_model): model_status = small_model.get_model_status() assert model_status.adapter_model_type == "LoraModel" def test_adapter_model_type_large(self, large_model): model_status = large_model.get_model_status() assert model_status.adapter_model_type == "LoraModel" def test_peft_types_small(self, small_model): model_status = small_model.get_model_status() assert model_status.peft_types == {"default": "LORA"} def test_peft_types_large(self, large_model): model_status = large_model.get_model_status() assert model_status.peft_types == {"default": "LORA", "other": "LORA"} def test_nb_params_small(self, small_model): model_status = small_model.get_model_status() assert model_status.trainable_params == 160 assert model_status.total_params == 380 def test_nb_params_large(self, large_model): model_status = large_model.get_model_status() assert model_status.trainable_params == 616 assert model_status.total_params == 2236 def test_num_adapter_layers_small(self, small_model): model_status = small_model.get_model_status() assert model_status.num_adapter_layers == 1 def test_num_adapter_layers_large(self, large_model): model_status = large_model.get_model_status() assert model_status.num_adapter_layers == 4 def test_model_enabled_small(self, small_model): model_status = small_model.get_model_status() assert model_status.enabled is True def test_model_enabled_large(self, large_model): model_status = large_model.get_model_status() assert model_status.enabled is True def test_model_disabled_small(self, small_model): small_model.disable_adapter_layers() model_status = small_model.get_model_status() assert model_status.enabled is False def test_model_disabled_large(self, large_model): large_model.disable_adapter_layers() model_status = large_model.get_model_status() assert model_status.enabled is False def test_model_enabled_irregular(self, large_model): # this is an invalid state, but we should still test it # disable a single layer for module in large_model.modules(): if isinstance(module, BaseTunerLayer): module.enable_adapters(False) break model_status = large_model.get_model_status() assert model_status.enabled == "irregular" def test_model_active_adapters_small(self, small_model): model_status = small_model.get_model_status() assert model_status.active_adapters == ["default"] def test_model_active_adapters_large(self, large_model): model_status = large_model.get_model_status() assert model_status.active_adapters == ["default"] large_model.set_adapter("other") model_status = large_model.get_model_status() assert model_status.active_adapters == ["other"] def test_model_active_adapters_irregular(self, large_model): # this is an invalid state, but we should still test it # disable a single layer for module in large_model.modules(): if isinstance(module, BaseTunerLayer): # switch a single layer's active adapter from default to other if module.active_adapters == ["default"]: module._active_adapter = "other" assert module.active_adapters == ["other"] break model_status = large_model.get_model_status() assert model_status.active_adapters == "irregular" def test_model_merged_adapters_small(self, small_model): model_status = small_model.get_model_status() assert model_status.merged_adapters == [] small_model.merge_adapter() model_status = small_model.get_model_status() assert model_status.merged_adapters == ["default"] small_model.unmerge_adapter() model_status = small_model.get_model_status() assert model_status.merged_adapters == [] def test_model_merged_adapters_large(self, large_model): model_status = large_model.get_model_status() assert model_status.merged_adapters == [] large_model.merge_adapter(["default"]) model_status = large_model.get_model_status() assert model_status.merged_adapters == ["default"] large_model.unmerge_adapter() large_model.merge_adapter(["other"]) model_status = large_model.get_model_status() assert model_status.merged_adapters == ["other"] large_model.unmerge_adapter() large_model.merge_adapter(["default", "other"]) model_status = large_model.get_model_status() assert model_status.merged_adapters == ["default", "other"] def test_model_merged_adapters_irregular(self, large_model): # this is an invalid state, but we should still test it # by merging only lin0 of "default", we end up in a irregular state, because not all "default" layers are merged large_model.base_model.lin0.merge(["default"]) model_status = large_model.get_model_status() assert model_status.merged_adapters == "irregular" def test_model_requires_grad_model_small(self, small_model): model_status = small_model.get_model_status() assert model_status.requires_grad == {"default": True} def test_model_requires_grad_model_large(self, large_model): model_status = large_model.get_model_status() assert model_status.requires_grad == {"default": True, "other": False} large_model.set_adapter("other") model_status = large_model.get_model_status() assert model_status.requires_grad == {"default": False, "other": True} def test_model_requires_grad_model_irregular(self, large_model): # inject an embedding layer with requires_grad=False # this is an invalid state, but we should still test it lora_embedding_A = nn.Parameter(torch.zeros(10, 10)) lora_embedding_B = nn.Parameter(torch.zeros(10, 10)) lora_embedding_A.requires_grad = False lora_embedding_B.requires_grad = False large_model.base_model.model.lin0.lora_embedding_A["default"] = lora_embedding_A large_model.base_model.model.lin0.lora_embedding_B["default"] = lora_embedding_B model_status = large_model.get_model_status() assert model_status.requires_grad == {"default": "irregular", "other": False} def test_model_available_adapters_small(self, small_model): model_status = small_model.get_model_status() assert model_status.available_adapters == ["default"] def test_model_available_adapters_large(self, large_model): model_status = large_model.get_model_status() assert model_status.available_adapters == ["default", "other"] def test_model_devices_all_cpu_small(self, small_model): model_status = small_model.get_model_status() assert model_status.devices == {"default": ["cpu"]} def test_model_devices_all_cpu_large(self, large_model): model_status = large_model.get_model_status() assert model_status.devices == {"default": ["cpu"], "other": ["cpu"]} @require_non_cpu def test_model_devices_all_gpu_large(self, large_model): large_model.to(self.torch_device) model_status = large_model.get_model_status() assert model_status.devices == {"default": [self.torch_device], "other": [self.torch_device]} @require_non_cpu def test_model_devices_cpu_and_gpu_large(self, large_model): # move the embedding layer to GPU large_model.model.lin0.lora_A["default"] = large_model.model.lin0.lora_A["default"].to(self.torch_device) model_status = large_model.get_model_status() assert model_status.devices == {"default": ["cpu", self.torch_device], "other": ["cpu"]} def test_model_target_parameters(self, large_model): # don't check each attribute, just the relevant ones # first remove the normal LoRA layers large_model = large_model.merge_and_unload() config = LoraConfig(target_parameters=["lin0.weight", "lin1.weight"]) large_model = get_peft_model(large_model, config) model_status = large_model.get_model_status() model_status = large_model.get_model_status() assert model_status.adapter_model_type == "LoraModel" assert model_status.peft_types == {"default": "LORA", "other": "LORA"} assert model_status.num_adapter_layers == 2 assert model_status.trainable_params == 2 * (8 * 10 + 10 * 8) def test_model_target_parameters_and_target_modules(self, large_model): # don't check each attribute, just the relevant ones # first remove the normal LoRA layers large_model = large_model.merge_and_unload() config = LoraConfig(target_parameters=["lin0.weight"], target_modules=["lin1"]) large_model = get_peft_model(large_model, config) model_status = large_model.get_model_status() assert model_status.adapter_model_type == "LoraModel" assert model_status.peft_types == {"default": "LORA", "other": "LORA"} assert model_status.num_adapter_layers == 2 assert model_status.trainable_params == 2 * (8 * 10 + 10 * 8) def test_loha_model(self): # ensure that this also works with non-LoRA, it's not necessary to test all tuners class SmallModel(nn.Module): def __init__(self): super().__init__() self.lin0 = nn.Linear(10, 10) self.lin1 = nn.Linear(10, 10) base_model = SmallModel() config = LoHaConfig(target_modules=["lin0", "lin1"], init_weights=False) model = get_peft_model(base_model, config) model_status = model.get_model_status() layer_status = model.get_layer_status() assert model_status.base_model_type == "SmallModel" assert model_status.adapter_model_type == "LoHaModel" assert model_status.peft_types == {"default": "LOHA"} assert model_status.trainable_params == 640 assert model_status.total_params == 860 assert model_status.num_adapter_layers == 2 assert model_status.enabled is True assert model_status.active_adapters == ["default"] assert model_status.merged_adapters == [] assert model_status.requires_grad == {"default": True} assert model_status.available_adapters == ["default"] assert model_status.devices == {"default": ["cpu"]} layer_status0 = layer_status[0] assert len(layer_status) == 2 assert layer_status0.name == "model.lin0" assert layer_status0.module_type == "loha.Linear" assert layer_status0.enabled is True assert layer_status0.active_adapters == ["default"] assert layer_status0.merged_adapters == [] assert layer_status0.requires_grad == {"default": True} assert layer_status0.available_adapters == ["default"] assert layer_status0.devices == {"default": ["cpu"]} @require_non_cpu def test_vera_model(self): # let's also test VeRA because it uses BufferDict class SmallModel(nn.Module): def __init__(self): super().__init__() self.lin0 = nn.Linear(10, 10) self.lin1 = nn.Linear(10, 10) base_model = SmallModel() config = VeraConfig(target_modules=["lin0", "lin1"], init_weights=False) model = get_peft_model(base_model, config) # move the buffer dict to GPU model.lin0.vera_A["default"] = model.lin0.vera_A["default"].to(self.torch_device) model_status = model.get_model_status() layer_status = model.get_layer_status() assert model_status.base_model_type == "SmallModel" assert model_status.adapter_model_type == "VeraModel" assert model_status.peft_types == {"default": "VERA"} assert model_status.trainable_params == 532 assert model_status.total_params == 752 assert model_status.num_adapter_layers == 2 assert model_status.enabled is True assert model_status.active_adapters == ["default"] assert model_status.merged_adapters == [] assert model_status.requires_grad == {"default": True} assert model_status.available_adapters == ["default"] assert model_status.devices == {"default": ["cpu", self.torch_device]} layer_status0 = layer_status[0] assert len(layer_status) == 2 assert layer_status0.name == "model.lin0" assert layer_status0.module_type == "vera.Linear" assert layer_status0.enabled is True assert layer_status0.active_adapters == ["default"] assert layer_status0.merged_adapters == [] assert layer_status0.requires_grad == {"default": True} assert layer_status0.available_adapters == ["default"] assert layer_status0.devices == {"default": ["cpu", self.torch_device]} ################### # non-PEFT models # ################### def test_transformers_model(self): model_id = "peft-internal-testing/gpt2-lora-random" # note that loading through AutoModelForCausalLM.from_pretrained does not enable training mode, hence # requires_grad=False with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) model_status = get_model_status(model) layer_status = get_layer_status(model) assert model_status.base_model_type == "GPT2LMHeadModel" assert model_status.adapter_model_type == "None" assert model_status.peft_types == {} assert model_status.trainable_params == 0 assert model_status.total_params == 124734720 assert model_status.num_adapter_layers == 12 assert model_status.enabled is True assert model_status.active_adapters == ["default"] assert model_status.merged_adapters == [] assert model_status.requires_grad == {"default": False} assert model_status.available_adapters == ["default"] assert model_status.devices == {"default": ["cpu"]} layer_status0 = layer_status[0] assert len(layer_status) == 12 assert layer_status0.name == "transformer.h.0.attn.c_attn" assert layer_status0.module_type == "lora.Linear" assert layer_status0.enabled is True assert layer_status0.active_adapters == ["default"] assert layer_status0.merged_adapters == [] assert layer_status0.requires_grad == {"default": False} assert layer_status0.available_adapters == ["default"] assert layer_status0.devices == {"default": ["cpu"]} def test_model_with_injected_layers(self, large_model): model = large_model.base_model.model model_status = get_model_status(model) layer_status = get_layer_status(model) assert model_status.base_model_type == "other" assert model_status.adapter_model_type == "None" assert model_status.peft_types == {} assert model_status.trainable_params == 616 assert model_status.total_params == 2236 assert model_status.num_adapter_layers == 4 assert model_status.enabled is True assert model_status.active_adapters == ["default"] assert model_status.merged_adapters == [] assert model_status.requires_grad == {"default": True, "other": False} assert model_status.available_adapters == ["default", "other"] assert model_status.devices == {"default": ["cpu"], "other": ["cpu"]} layer_status1 = layer_status[1] assert len(layer_status) == 4 assert layer_status1.name == "emb0" assert layer_status1.module_type == "lora.Embedding" assert layer_status1.enabled is True assert layer_status1.active_adapters == ["default"] assert layer_status1.merged_adapters == [] assert layer_status1.requires_grad == {"default": True} assert layer_status1.available_adapters == ["default"] assert layer_status1.devices == {"default": ["cpu"]} ############### # error cases # ############### def test_vanilla_model_raises(self): model = nn.Linear(10, 10) # note: full error message is longer with pytest.raises(ValueError, match="No adapter layers found in the model"): get_layer_status(model) with pytest.raises(ValueError, match="No adapter layers found in the model"): get_model_status(model) def test_transformer_model_without_adapter_raises(self): model_id = "gpt2" with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) # note: full error message is longer with pytest.raises(ValueError, match="No adapter layers found in the model"): get_layer_status(model) with pytest.raises(ValueError, match="No adapter layers found in the model"): get_model_status(model) def test_prefix_tuning(self): model_id = "hf-internal-testing/tiny-random-BartForConditionalGeneration" with hub_online_once(model_id): model = AutoModelForSeq2SeqLM.from_pretrained(model_id) config = PromptTuningConfig(task_type="SEQ_2_SEQ_LM", num_virtual_tokens=10) model = get_peft_model(model, config) # note: full error message is longer with pytest.raises(TypeError, match=re.escape("get_layer_status() got an invalid PeftModel instance")): model.get_layer_status() with pytest.raises(TypeError, match=re.escape("get_model_status() got an invalid PeftModel instance")): model.get_model_status() def test_adaption_prompt(self): model_id = "HuggingFaceH4/tiny-random-LlamaForCausalLM" with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4) model = get_peft_model(model, config) # note: full error message is longer with pytest.raises(TypeError, match=re.escape("get_layer_status() got an invalid PeftModel instance")): model.get_layer_status() with pytest.raises(TypeError, match=re.escape("get_model_status() got an invalid PeftModel instance")): model.get_model_status() def test_mixed_model_raises(self): class SimpleNet(nn.Module): def __init__(self, bias=True): super().__init__() # note: out_features must be > rank or else OFT will be an identity transform self.lin0 = nn.Linear(10, 20, bias=bias) self.relu = nn.ReLU() self.lin1 = nn.Linear(20, 16, bias=bias) def forward(self, X): X = X.float() X = self.lin0(X) X = self.relu(X) X = self.lin1(X) return X base_model = SimpleNet() config0 = LoraConfig(target_modules=["lin0"], init_lora_weights=False) config1 = LoHaConfig(target_modules=["lin0", "lin1"], init_weights=False) model = get_peft_model(base_model, config0, adapter_name="adapter0", mixed="mixed") model.add_adapter("adapter1", config1) # note: full error message is longer with pytest.raises(TypeError, match="get_layer_status is not supported for PeftMixedModel"): model.get_layer_status() with pytest.raises(TypeError, match="get_model_status is not supported for PeftMixedModel"): model.get_model_status() # Tests for BaseTuner class MockModelConfig: config = {"mock_key": "mock_value"} def to_dict(self): return self.config class ModelWithConfig(nn.Module): def __init__(self): self.config = MockModelConfig() class ModelWithDictConfig(nn.Module): def __init__(self): self.config = MockModelConfig.config class ModelWithNoConfig(nn.Module): pass class TestBaseTunerGetModelConfig(unittest.TestCase): def test_get_model_config_use_to_dict(self): config = BaseTuner.get_model_config(ModelWithConfig()) assert config == MockModelConfig.config def test_get_model_config_as_dict(self): config = BaseTuner.get_model_config(ModelWithDictConfig()) assert config == MockModelConfig.config def test_get_model_config_with_no_config(self): config = BaseTuner.get_model_config(ModelWithNoConfig()) assert config == DUMMY_MODEL_CONFIG class TestBaseTunerWarnForTiedEmbeddings: model_id = "HuggingFaceH4/tiny-random-LlamaForCausalLM" warn_end_inject = "huggingface/peft/issues/2018." warn_end_merge = ( "# Now use the original model but in untied format\n" "model = AutoModelForCausalLM.from_pretrained(untied_model_dir)\n```\n" ) def _get_peft_model(self, tie_word_embeddings, target_module): with hub_online_once(self.model_id): base_model = AutoModelForCausalLM.from_pretrained(self.model_id, tie_word_embeddings=tie_word_embeddings) model = get_peft_model( base_model, LoraConfig(target_modules=[target_module]), ) return model def _is_warn_triggered(self, warning_list, endswith): return any(str(warning.message).endswith(endswith) for warning in warning_list) def test_warn_for_tied_embeddings_inject(self, recwarn): self._get_peft_model(tie_word_embeddings=True, target_module="lm_head") assert self._is_warn_triggered(recwarn.list, self.warn_end_inject) def test_warn_for_tied_embeddings_merge(self, recwarn): model = self._get_peft_model(tie_word_embeddings=True, target_module="lm_head") model.merge_and_unload() assert self._is_warn_triggered(recwarn.list, self.warn_end_merge) def test_no_warn_for_untied_embeddings_inject(self, recwarn): self._get_peft_model(tie_word_embeddings=False, target_module="lm_head") assert not self._is_warn_triggered(recwarn.list, self.warn_end_inject) def test_no_warn_for_untied_embeddings_merge(self, recwarn): model_not_tied = self._get_peft_model(tie_word_embeddings=False, target_module="lm_head") model_not_tied.merge_and_unload() assert not self._is_warn_triggered(recwarn.list, self.warn_end_merge) def test_no_warn_for_no_target_module_inject(self, recwarn): self._get_peft_model(tie_word_embeddings=True, target_module="q_proj") assert not self._is_warn_triggered(recwarn.list, self.warn_end_inject) def test_no_warn_for_no_target_module_merge(self, recwarn): model_no_target_module = self._get_peft_model(tie_word_embeddings=True, target_module="q_proj") model_no_target_module.merge_and_unload() assert not self._is_warn_triggered(recwarn.list, self.warn_end_merge) class TestFindMinimalTargetModules: @pytest.mark.parametrize( "target_modules, other_module_names, expected", [ (["bar"], [], {"bar"}), (["foo"], ["bar"], {"foo"}), (["1.foo", "2.foo"], ["3.foo", "4.foo"], {"1.foo", "2.foo"}), # Could also return "bar.baz" but we want the shorter one (["bar.baz"], ["foo.bar"], {"baz"}), (["1.foo", "2.foo", "bar.baz"], ["3.foo", "bar.bla"], {"1.foo", "2.foo", "baz"}), # Case with longer suffix chains and nested suffixes (["a.b.c", "d.e.f", "g.h.i"], ["j.k.l", "m.n.o"], {"c", "f", "i"}), (["a.b.c", "d.e.f", "g.h.i"], ["a.b.x", "d.x.f", "x.h.i"], {"c", "e.f", "g.h.i"}), # Case with multiple items that can be covered by a single suffix (["foo.bar.baz", "qux.bar.baz"], ["baz.bar.foo"], {"baz"}), # Realistic examples # Only match k_proj ( ["model.decoder.layers.{i}.self_attn.k_proj" for i in range(12)], ( ["model.decoder.layers.{i}.self_attn" for i in range(12)] + ["model.decoder.layers.{i}.self_attn.v_proj" for i in range(12)] + ["model.decoder.layers.{i}.self_attn.q_proj" for i in range(12)] ), {"k_proj"}, ), # Match all k_proj except the one in layer 5 => no common suffix ( ["model.decoder.layers.{i}.self_attn.k_proj" for i in range(12) if i != 5], ( ["model.decoder.layers.5.self_attn.k_proj"] + ["model.decoder.layers.{i}.self_attn" for i in range(12)] + ["model.decoder.layers.{i}.self_attn.v_proj" for i in range(12)] + ["model.decoder.layers.{i}.self_attn.q_proj" for i in range(12)] ), {"{i}.self_attn.k_proj" for i in range(12) if i != 5}, ), ], ) def test_find_minimal_target_modules(self, target_modules, other_module_names, expected): # check all possible combinations of list and set result = find_minimal_target_modules(target_modules, other_module_names) assert result == expected result = find_minimal_target_modules(set(target_modules), other_module_names) assert result == expected result = find_minimal_target_modules(target_modules, set(other_module_names)) assert result == expected result = find_minimal_target_modules(set(target_modules), set(other_module_names)) assert result == expected def test_find_minimal_target_modules_empty_raises(self): with pytest.raises(ValueError, match="target_modules should be a list or set of strings"): find_minimal_target_modules([], ["foo"]) with pytest.raises(ValueError, match="target_modules should be a list or set of strings"): find_minimal_target_modules(set(), ["foo"]) def test_find_minimal_target_modules_contains_empty_string_raises(self): target_modules = ["", "foo", "bar.baz"] other_module_names = ["bar"] with pytest.raises(ValueError, match="target_modules should not contain an empty string"): find_minimal_target_modules(target_modules, other_module_names) def test_find_minimal_target_modules_string_raises(self): target_modules = "foo" other_module_names = ["bar"] with pytest.raises(ValueError, match="target_modules should be a list or set of strings"): find_minimal_target_modules(target_modules, other_module_names) @pytest.mark.parametrize( "target_modules, other_module_names", [ (["foo"], ["foo"]), (["foo.bar"], ["foo.bar"]), (["foo.bar", "spam", "eggs"], ["foo.bar"]), (["foo.bar", "spam"], ["foo.bar", "eggs"]), (["foo.bar"], ["foo.bar", "spam", "eggs"]), ], ) def test_find_minimal_target_modules_not_disjoint_raises(self, target_modules, other_module_names): msg = ( "target_modules and other_module_names contain common elements, this should not happen, please " "open a GitHub issue at https://github.com/huggingface/peft/issues with the code to reproduce this issue" ) with pytest.raises(ValueError, match=msg): find_minimal_target_modules(target_modules, other_module_names) def test_get_peft_model_applies_find_target_modules(self): # Check that when calling get_peft_model, the target_module optimization is indeed applied if the length of # target_modules is big enough. The resulting model itself should be unaffected. torch.manual_seed(0) model_id = "facebook/opt-125m" # must be big enough for optimization to trigger with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) # base case: specify target_modules in a minimal fashion config = LoraConfig(init_lora_weights=False, target_modules=["q_proj", "v_proj"]) model = get_peft_model(model, config) # this list contains all targeted modules listed separately big_target_modules = [name for name, module in model.named_modules() if isinstance(module, LoraLayer)] # sanity check assert len(big_target_modules) > MIN_TARGET_MODULES_FOR_OPTIMIZATION # make a "checksum" of the model for comparison model_check_sum_before = sum(p.sum() for p in model.parameters()) # strip prefix so that the names they can be used as new target_modules prefix_to_strip = "base_model.model.model." big_target_modules = [name[len(prefix_to_strip) :] for name in big_target_modules] del model torch.manual_seed(0) with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) # pass the big target_modules to config config = LoraConfig(init_lora_weights=False, target_modules=big_target_modules) model = get_peft_model(model, config) # check that target modules have been condensed assert model.peft_config["default"].target_modules == {"q_proj", "v_proj"} # check that the resulting model is still the same model_check_after = sum(p.sum() for p in model.parameters()) assert model_check_sum_before == model_check_after def test_suffix_is_substring_of_other_suffix(self): # This test is based on a real world bug found in diffusers. The issue was that we needed the suffix # 'time_emb_proj' in the minimal target modules. However, if there already was the suffix 'proj' in the # required_suffixes, 'time_emb_proj' would not be added because the test was `endswith(suffix)` and # 'time_emb_proj' ends with 'proj'. The correct logic is to test if `endswith("." + suffix")`. The module names # chosen here are only a subset of the hundreds of actual module names but this subset is sufficient to # replicate the bug. target_modules = [ "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", "mid_block.attentions.0.proj_out", "up_blocks.0.attentions.0.proj_out", "down_blocks.1.attentions.0.proj_out", "up_blocks.0.resnets.0.time_emb_proj", "down_blocks.0.resnets.0.time_emb_proj", "mid_block.resnets.0.time_emb_proj", ] other_module_names = [ "conv_in", "time_proj", "time_embedding", "time_embedding.linear_1", "add_time_proj", "add_embedding", "add_embedding.linear_1", "add_embedding.linear_2", "down_blocks", "down_blocks.0", "down_blocks.0.resnets", "down_blocks.0.resnets.0", "up_blocks", "up_blocks.0", "up_blocks.0.attentions", "up_blocks.0.attentions.0", "up_blocks.0.attentions.0.norm", "up_blocks.0.attentions.0.transformer_blocks", "up_blocks.0.attentions.0.transformer_blocks.0", "up_blocks.0.attentions.0.transformer_blocks.0.norm1", "up_blocks.0.attentions.0.transformer_blocks.0.attn1", ] expected = {"time_emb_proj", "proj", "proj_out"} result = find_minimal_target_modules(target_modules, other_module_names) assert result == expected def test_get_peft_modules_module_name_is_suffix_of_another_module(self): # Solves the following bug: # https://github.com/huggingface/diffusers/pull/9622#issuecomment-2404789721 # The cause for the bug is as follows: When we have, say, a module called "bar.0.query" that we want to target # and another module called "foo_bar.0.query" that we don't want to target, there was potential for an error. # This is not caused by _find_minimal_target_modules directly, but rather the bug was inside of # BaseTuner.inject_adapter and how the names_no_target were chosen. Those used to be chosen based on suffix. In # our example, however, "bar.0.query" is a suffix of "foo_bar.0.query", therefore "foo_bar.0.query" was *not* # added to names_no_target when it should have. As a consequence, during the optimization, it looks like "query" # is safe to use as target_modules because we don't see that it wrongly matches "foo_bar.0.query". # ensure that we have sufficiently many modules to trigger the optimization n_layers = MIN_TARGET_MODULES_FOR_OPTIMIZATION + 1 class InnerModule(nn.Module): def __init__(self): super().__init__() self.query = nn.Linear(10, 10) class OuterModule(nn.Module): def __init__(self): super().__init__() # note that "transformer_blocks" is a suffix of "single_transformer_blocks" self.transformer_blocks = nn.ModuleList([InnerModule() for _ in range(n_layers)]) self.single_transformer_blocks = nn.ModuleList([InnerModule() for _ in range(n_layers)]) # we want to match all "transformer_blocks" layers but not "single_transformer_blocks" target_modules = [f"transformer_blocks.{i}.query" for i in range(n_layers)] model = get_peft_model(OuterModule(), LoraConfig(target_modules=target_modules)) # sanity check: we should have n_layers PEFT layers in model.transformer_blocks transformer_blocks = model.base_model.model.transformer_blocks assert sum(isinstance(module, BaseTunerLayer) for module in transformer_blocks.modules()) == n_layers # we should not have any PEFT layers in model.single_transformer_blocks single_transformer_blocks = model.base_model.model.single_transformer_blocks assert not any(isinstance(module, BaseTunerLayer) for module in single_transformer_blocks.modules()) # target modules should *not* be simplified to "query" as that would match "single_transformers_blocks" too assert model.peft_config["default"].target_modules != {"query"} def test_find_minimal_target_modules_does_not_error_with_ia3(self, tmp_path): # See #2429 # There is an issue with the compression of the target_modules attribute when using IAยณ. There, we additionally # have the feedforward_modules attribute, which must be subset of target_modules. When target_modules is shrunk, # the subset check will fail. This test ensures that this doesn't happen. n_layers = MIN_TARGET_MODULES_FOR_OPTIMIZATION + 1 class InnerModule(nn.Module): def __init__(self): super().__init__() self.query = nn.Linear(10, 10) class OuterModule(nn.Module): def __init__(self): super().__init__() self.blocks = nn.ModuleList([InnerModule() for _ in range(n_layers)]) target_modules = [f"blocks.{i}.query" for i in range(n_layers)] feedforward_modules = [f"blocks.{i}.query" for i in range(n_layers)] # the subset check happens here config = IA3Config(target_modules=target_modules, feedforward_modules=feedforward_modules) # the optimization step happens here, after the subset check, so at first we're fine, but we will run into an # issue after a save/load roundtrip model = get_peft_model(OuterModule(), config) model.save_pretrained(tmp_path) del model # does not raise PeftModel.from_pretrained(OuterModule(), tmp_path) class TestRankAndAlphaPattern: @pytest.fixture def model(self): # we always target the foo layers, the *bar* layers are used as a control group to ensure that they are not # accidentally targeted class Inner(nn.Module): def __init__(self): super().__init__() self.foo = nn.Linear(1, 1) self.barfoo = nn.Linear(1, 1) class Middle(nn.Module): def __init__(self): super().__init__() self.foo = nn.Linear(1, 1) self.foobar = nn.Linear(1, 1) self.module = Inner() class Outer(nn.Module): def __init__(self): super().__init__() self.foo = nn.Linear(1, 1) self.bar = nn.Linear(1, 1) self.module = Middle() # resulting model for overview: # Outer( # (foo): Linear(...) # (bar): Linear(...) # (module): Middle( # (foo): Linear(...) # (foobar): Linear(...) # (module): Inner( # (foo): Linear(...) # (barfoo): Linear(...) # ) # ) # ) return Outer() def test_no_rank_nor_alpha_pattern(self, model): # sanity check the default case, no rank or alpha pattern config = LoraConfig(target_modules="all-linear") model = get_peft_model(model, config).base_model.model # r is the default rank and alpha, thus scaling is 1.0 assert model.foo.r["default"] == 8 assert model.foo.scaling["default"] == 1.0 assert model.bar.r["default"] == 8 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.r["default"] == 8 assert model.module.foo.scaling["default"] == 1.0 assert model.module.foobar.r["default"] == 8 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.r["default"] == 8 assert model.module.module.foo.scaling["default"] == 1.0 assert model.module.module.barfoo.r["default"] == 8 assert model.module.module.barfoo.scaling["default"] == 1.0 def test_rank_and_alpha_pattern_no_matching_keys(self, model): # sanity check for non-matching keys, no rank or alpha pattern config = LoraConfig(target_modules="all-linear", rank_pattern={"bla": 4, "oof": 6}, alpha_pattern={"baz": 3}) model = get_peft_model(model, config).base_model.model # r is the default rank and alpha, thus scaling is 1.0 assert model.foo.r["default"] == 8 assert model.foo.scaling["default"] == 1.0 assert model.bar.r["default"] == 8 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.r["default"] == 8 assert model.module.foo.scaling["default"] == 1.0 assert model.module.foobar.r["default"] == 8 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.r["default"] == 8 assert model.module.module.foo.scaling["default"] == 1.0 assert model.module.module.barfoo.r["default"] == 8 assert model.module.module.barfoo.scaling["default"] == 1.0 # below, we test all permutations for rank_pattern of targeting outer, middle, and inner foo layers: def test_rank_pattern_target_all(self, model): config = LoraConfig(target_modules="all-linear", rank_pattern={"foo": 16}) model = get_peft_model(model, config).base_model.model assert model.foo.r["default"] == 16 assert model.bar.r["default"] == 8 assert model.module.foo.r["default"] == 16 assert model.module.foobar.r["default"] == 8 assert model.module.module.foo.r["default"] == 16 assert model.module.module.barfoo.r["default"] == 8 def test_rank_pattern_target_outer(self, model): config = LoraConfig(target_modules="all-linear", rank_pattern={"^foo": 16}) model = get_peft_model(model, config).base_model.model assert model.foo.r["default"] == 16 assert model.bar.r["default"] == 8 assert model.module.foo.r["default"] == 8 assert model.module.foobar.r["default"] == 8 assert model.module.module.foo.r["default"] == 8 assert model.module.module.barfoo.r["default"] == 8 def test_rank_pattern_target_middle(self, model): config = LoraConfig(target_modules="all-linear", rank_pattern={"^module.foo": 16}) model = get_peft_model(model, config).base_model.model assert model.foo.r["default"] == 8 assert model.bar.r["default"] == 8 assert model.module.foo.r["default"] == 16 assert model.module.foobar.r["default"] == 8 assert model.module.module.foo.r["default"] == 8 assert model.module.module.barfoo.r["default"] == 8 def test_rank_pattern_target_inner(self, model): config = LoraConfig(target_modules="all-linear", rank_pattern={"module.module.foo": 16}) model = get_peft_model(model, config).base_model.model assert model.foo.r["default"] == 8 assert model.bar.r["default"] == 8 assert model.module.foo.r["default"] == 8 assert model.module.foobar.r["default"] == 8 assert model.module.module.foo.r["default"] == 16 assert model.module.module.barfoo.r["default"] == 8 def test_rank_pattern_target_inner_with_caret(self, model): # same as before, but using the caret in the regex should also work config = LoraConfig(target_modules="all-linear", rank_pattern={"^module.module.foo": 16}) model = get_peft_model(model, config).base_model.model assert model.foo.r["default"] == 8 assert model.bar.r["default"] == 8 assert model.module.foo.r["default"] == 8 assert model.module.foobar.r["default"] == 8 assert model.module.module.foo.r["default"] == 16 assert model.module.module.barfoo.r["default"] == 8 def test_rank_pattern_target_middle_inner(self, model): config = LoraConfig(target_modules="all-linear", rank_pattern={"module.foo": 16}) model = get_peft_model(model, config).base_model.model assert model.foo.r["default"] == 8 assert model.bar.r["default"] == 8 assert model.module.foo.r["default"] == 16 assert model.module.foobar.r["default"] == 8 assert model.module.module.foo.r["default"] == 16 assert model.module.module.barfoo.r["default"] == 8 def test_rank_pattern_target_middle_inner_different_ranks(self, model): # same layers targeted as in previous test, but with different ranks config = LoraConfig(target_modules="all-linear", rank_pattern={"^module.foo": 16, "^module.module.foo": 24}) model = get_peft_model(model, config).base_model.model assert model.foo.r["default"] == 8 assert model.bar.r["default"] == 8 assert model.module.foo.r["default"] == 16 assert model.module.foobar.r["default"] == 8 assert model.module.module.foo.r["default"] == 24 assert model.module.module.barfoo.r["default"] == 8 def test_rank_pattern_target_outer_middle(self, model): config = LoraConfig(target_modules="all-linear", rank_pattern={"^foo": 16, "^module.foo": 24}) model = get_peft_model(model, config).base_model.model assert model.foo.r["default"] == 16 assert model.bar.r["default"] == 8 assert model.module.foo.r["default"] == 24 assert model.module.foobar.r["default"] == 8 assert model.module.module.foo.r["default"] == 8 assert model.module.module.barfoo.r["default"] == 8 def test_rank_pattern_target_outer_inner(self, model): config = LoraConfig(target_modules="all-linear", rank_pattern={"^foo": 16, "module.module.foo": 24}) model = get_peft_model(model, config).base_model.model assert model.foo.r["default"] == 16 assert model.bar.r["default"] == 8 assert model.module.foo.r["default"] == 8 assert model.module.foobar.r["default"] == 8 assert model.module.module.foo.r["default"] == 24 assert model.module.module.barfoo.r["default"] == 8 def test_rank_pattern_target_outer_inner_with_caret(self, model): # same as before, but using the caret in the regex should also work config = LoraConfig(target_modules="all-linear", rank_pattern={"^foo": 16, "^module.module.foo": 24}) model = get_peft_model(model, config).base_model.model assert model.foo.r["default"] == 16 assert model.bar.r["default"] == 8 assert model.module.foo.r["default"] == 8 assert model.module.foobar.r["default"] == 8 assert model.module.module.foo.r["default"] == 24 assert model.module.module.barfoo.r["default"] == 8 def test_rank_pattern_target_outer_middle_inner_with_caret(self, model): # indicate each layer with a different rank and use the caret in the regex config = LoraConfig( target_modules="all-linear", rank_pattern={"^foo": 16, "^module.foo": 24, "^module.module.foo": 32} ) model = get_peft_model(model, config).base_model.model assert model.foo.r["default"] == 16 assert model.bar.r["default"] == 8 assert model.module.foo.r["default"] == 24 assert model.module.foobar.r["default"] == 8 assert model.module.module.foo.r["default"] == 32 assert model.module.module.barfoo.r["default"] == 8 def test_rank_pattern_target_outer_middle_inner_with_caret_dict_order(self, model): # same as before, but change the order of the rank_pattern dict config = LoraConfig( target_modules="all-linear", rank_pattern={"^module.module.foo": 32, "^module.foo": 24, "^foo": 16} ) model = get_peft_model(model, config).base_model.model assert model.foo.r["default"] == 16 assert model.bar.r["default"] == 8 assert model.module.foo.r["default"] == 24 assert model.module.foobar.r["default"] == 8 assert model.module.module.foo.r["default"] == 32 assert model.module.module.barfoo.r["default"] == 8 # below, we test all permutations for alpha_pattern of targeting outer, middle, and inner foo layers: # these tests are analogous to the rank_pattern tests above def test_alpha_pattern_target_all(self, model): config = LoraConfig(target_modules="all-linear", alpha_pattern={"foo": 4}) model = get_peft_model(model, config).base_model.model assert model.foo.scaling["default"] == 0.5 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.scaling["default"] == 0.5 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.scaling["default"] == 0.5 assert model.module.module.barfoo.scaling["default"] == 1.0 def test_alpha_pattern_target_outer(self, model): config = LoraConfig(target_modules="all-linear", alpha_pattern={"^foo": 4}) model = get_peft_model(model, config).base_model.model assert model.foo.scaling["default"] == 0.5 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.scaling["default"] == 1.0 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.scaling["default"] == 1.0 assert model.module.module.barfoo.scaling["default"] == 1.0 def test_alpha_pattern_target_middle(self, model): config = LoraConfig(target_modules="all-linear", alpha_pattern={"^module.foo": 4}) model = get_peft_model(model, config).base_model.model assert model.foo.scaling["default"] == 1.0 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.scaling["default"] == 0.5 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.scaling["default"] == 1.0 assert model.module.module.barfoo.scaling["default"] == 1.0 def test_alpha_pattern_target_inner(self, model): config = LoraConfig(target_modules="all-linear", alpha_pattern={"module.module.foo": 4}) model = get_peft_model(model, config).base_model.model assert model.foo.scaling["default"] == 1.0 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.scaling["default"] == 1.0 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.scaling["default"] == 0.5 assert model.module.module.barfoo.scaling["default"] == 1.0 def test_alpha_pattern_target_inner_with_caret(self, model): # same as before, but using the caret in the regex should also work config = LoraConfig(target_modules="all-linear", alpha_pattern={"^module.module.foo": 4}) model = get_peft_model(model, config).base_model.model assert model.foo.scaling["default"] == 1.0 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.scaling["default"] == 1.0 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.scaling["default"] == 0.5 assert model.module.module.barfoo.scaling["default"] == 1.0 def test_alpha_pattern_target_middle_inner(self, model): config = LoraConfig(target_modules="all-linear", alpha_pattern={"module.foo": 4}) model = get_peft_model(model, config).base_model.model assert model.foo.scaling["default"] == 1.0 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.scaling["default"] == 0.5 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.scaling["default"] == 0.5 assert model.module.module.barfoo.scaling["default"] == 1.0 def test_alpha_pattern_target_middle_inner_different_alphas(self, model): # same layers targeted as in previous test, but with different alphas config = LoraConfig(target_modules="all-linear", alpha_pattern={"^module.foo": 4, "^module.module.foo": 2}) model = get_peft_model(model, config).base_model.model assert model.foo.scaling["default"] == 1.0 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.scaling["default"] == 0.5 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.scaling["default"] == 0.25 assert model.module.module.barfoo.scaling["default"] == 1.0 def test_alpha_pattern_target_outer_middle(self, model): config = LoraConfig(target_modules="all-linear", alpha_pattern={"^foo": 4, "^module.foo": 2}) model = get_peft_model(model, config).base_model.model assert model.foo.scaling["default"] == 0.5 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.scaling["default"] == 0.25 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.scaling["default"] == 1.0 assert model.module.module.barfoo.scaling["default"] == 1.0 def test_alpha_pattern_target_outer_inner(self, model): config = LoraConfig(target_modules="all-linear", alpha_pattern={"^foo": 4, "module.module.foo": 2}) model = get_peft_model(model, config).base_model.model assert model.foo.scaling["default"] == 0.5 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.scaling["default"] == 1.0 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.scaling["default"] == 0.25 assert model.module.module.barfoo.scaling["default"] == 1.0 def test_alpha_pattern_target_outer_inner_with_caret(self, model): # same as before, but using the caret in the regex should also work config = LoraConfig(target_modules="all-linear", alpha_pattern={"^foo": 4, "^module.module.foo": 2}) model = get_peft_model(model, config).base_model.model assert model.foo.scaling["default"] == 0.5 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.scaling["default"] == 1.0 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.scaling["default"] == 0.25 assert model.module.module.barfoo.scaling["default"] == 1.0 def test_alpha_pattern_target_outer_middle_inner_with_caret(self, model): # indicate each layer with a different alpha and use the caret in the regex config = LoraConfig( target_modules="all-linear", alpha_pattern={"^foo": 4, "^module.foo": 2, "^module.module.foo": 1} ) model = get_peft_model(model, config).base_model.model assert model.foo.scaling["default"] == 0.5 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.scaling["default"] == 0.25 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.scaling["default"] == 0.125 assert model.module.module.barfoo.scaling["default"] == 1.0 def test_alpha_pattern_target_outer_middle_inner_with_caret_dict_order(self, model): # same as before, but change the order of the alpha_pattern dict config = LoraConfig( target_modules="all-linear", alpha_pattern={"^module.module.foo": 1, "^module.foo": 2, "^foo": 4} ) model = get_peft_model(model, config).base_model.model assert model.foo.scaling["default"] == 0.5 assert model.bar.scaling["default"] == 1.0 assert model.module.foo.scaling["default"] == 0.25 assert model.module.foobar.scaling["default"] == 1.0 assert model.module.module.foo.scaling["default"] == 0.125 assert model.module.module.barfoo.scaling["default"] == 1.0
peft/tests/test_tuners_utils.py/0
{ "file_path": "peft/tests/test_tuners_utils.py", "repo_id": "peft", "token_count": 38830 }
250
*This guideline is very much a work-in-progress.* Contributions to `timm` for code, documentation, tests are more than welcome! There haven't been any formal guidelines to date so please bear with me, and feel free to add to this guide. # Coding style Code linting and auto-format (black) are not currently in place but open to consideration. In the meantime, the style to follow is (mostly) aligned with Google's guide: https://google.github.io/styleguide/pyguide.html. A few specific differences from Google style (or black) 1. Line length is 120 char. Going over is okay in some cases (e.g. I prefer not to break URL across lines). 2. Hanging indents are always preferred, please avoid aligning arguments with closing brackets or braces. Example, from Google guide, but this is a NO here: ``` # Aligned with opening delimiter. foo = long_function_name(var_one, var_two, var_three, var_four) meal = (spam, beans) # Aligned with opening delimiter in a dictionary. foo = { 'long_dictionary_key': value1 + value2, ... } ``` This is YES: ``` # 4-space hanging indent; nothing on first line, # closing parenthesis on a new line. foo = long_function_name( var_one, var_two, var_three, var_four ) meal = ( spam, beans, ) # 4-space hanging indent in a dictionary. foo = { 'long_dictionary_key': long_dictionary_value, ... } ``` When there is discrepancy in a given source file (there are many origins for various bits of code and not all have been updated to what I consider current goal), please follow the style in a given file. In general, if you add new code, formatting it with black using the following options should result in a style that is compatible with the rest of the code base: ``` black --skip-string-normalization --line-length 120 <path-to-file> ``` Avoid formatting code that is unrelated to your PR though. PR with pure formatting / style fixes will be accepted but only in isolation from functional changes, best to ask before starting such a change. # Documentation As with code style, docstrings style based on the Google guide: guide: https://google.github.io/styleguide/pyguide.html The goal for the code is to eventually move to have all major functions and `__init__` methods use PEP484 type annotations. When type annotations are used for a function, as per the Google pyguide, they should **NOT** be duplicated in the docstrings, please leave annotations as the one source of truth re typing. There are a LOT of gaps in current documentation relative to the functionality in timm, please, document away! # Installation Create a Python virtual environment using Python 3.10. Inside the environment, install torch` and `torchvision` using the instructions matching your system as listed on the [PyTorch website](https://pytorch.org/). Then install the remaining dependencies: ``` python -m pip install -r requirements.txt python -m pip install -r requirements-dev.txt # for testing python -m pip install -e . ``` ## Unit tests Run the tests using: ``` pytest tests/ ``` Since the whole test suite takes a lot of time to run locally (a few hours), you may want to select a subset of tests relating to the changes you made by using the `-k` option of [`pytest`](https://docs.pytest.org/en/7.1.x/example/markers.html#using-k-expr-to-select-tests-based-on-their-name). Moreover, running tests in parallel (in this example 4 processes) with the `-n` option may help: ``` pytest -k "substring-to-match" -n 4 tests/ ``` ## Building documentation Please refer to [this document](https://github.com/huggingface/pytorch-image-models/tree/main/hfdocs). # Questions If you have any questions about contribution, where / how to contribute, please ask in the [Discussions](https://github.com/huggingface/pytorch-image-models/discussions/categories/contributing) (there is a `Contributing` topic).
pytorch-image-models/CONTRIBUTING.md/0
{ "file_path": "pytorch-image-models/CONTRIBUTING.md", "repo_id": "pytorch-image-models", "token_count": 1223 }
251
# Sharing and Loading Models From the Hugging Face Hub The `timm` library has a built-in integration with the Hugging Face Hub, making it easy to share and load models from the ๐Ÿค— Hub. In this short guide, we'll see how to: 1. Share a `timm` model on the Hub 2. How to load that model back from the Hub ## Authenticating First, you'll need to make sure you have the `huggingface_hub` package installed. ```bash pip install huggingface_hub ``` Then, you'll need to authenticate yourself. You can do this by running the following command: ```bash huggingface-cli login ``` Or, if you're using a notebook, you can use the `notebook_login` helper: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Sharing a Model ```py >>> import timm >>> model = timm.create_model('resnet18', pretrained=True, num_classes=4) ``` Here is where you would normally train or fine-tune the model. We'll skip that for the sake of this tutorial. Let's pretend we've now fine-tuned the model. The next step would be to push it to the Hub! We can do this with the `timm.models.hub.push_to_hf_hub` function. ```py >>> model_cfg = dict(label_names=['a', 'b', 'c', 'd']) >>> timm.models.push_to_hf_hub(model, 'resnet18-random', model_config=model_cfg) ``` Running the above would push the model to `<your-username>/resnet18-random` on the Hub. You can now share this model with your friends, or use it in your own code! ## Loading a Model Loading a model from the Hub is as simple as calling `timm.create_model` with the `pretrained` argument set to the name of the model you want to load. In this case, we'll use [`nateraw/resnet18-random`](https://huggingface.co/nateraw/resnet18-random), which is the model we just pushed to the Hub. ```py >>> model_reloaded = timm.create_model('hf_hub:nateraw/resnet18-random', pretrained=True) ```
pytorch-image-models/hfdocs/source/hf_hub.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/hf_hub.mdx", "repo_id": "pytorch-image-models", "token_count": 593 }
252
# # Ensemble Adversarial Inception ResNet v2 **Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture). This particular model was trained for study of adversarial examples (adversarial training). The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('ens_adv_inception_resnet_v2', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `ens_adv_inception_resnet_v2`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('ens_adv_inception_resnet_v2', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1804-00097, author = {Alexey Kurakin and Ian J. Goodfellow and Samy Bengio and Yinpeng Dong and Fangzhou Liao and Ming Liang and Tianyu Pang and Jun Zhu and Xiaolin Hu and Cihang Xie and Jianyu Wang and Zhishuai Zhang and Zhou Ren and Alan L. Yuille and Sangxia Huang and Yao Zhao and Yuzhe Zhao and Zhonglin Han and Junjiajia Long and Yerkebulan Berdibekov and Takuya Akiba and Seiya Tokui and Motoki Abe}, title = {Adversarial Attacks and Defences Competition}, journal = {CoRR}, volume = {abs/1804.00097}, year = {2018}, url = {http://arxiv.org/abs/1804.00097}, archivePrefix = {arXiv}, eprint = {1804.00097}, timestamp = {Thu, 31 Oct 2019 16:31:22 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-1804-00097.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: Ensemble Adversarial Paper: Title: Adversarial Attacks and Defences Competition URL: https://paperswithcode.com/paper/adversarial-attacks-and-defences-competition Models: - Name: ens_adv_inception_resnet_v2 In Collection: Ensemble Adversarial Metadata: FLOPs: 16959133120 Parameters: 55850000 File Size: 223774238 Architecture: - 1x1 Convolution - Auxiliary Classifier - Average Pooling - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inception-v3 Module - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: ens_adv_inception_resnet_v2 Crop Pct: '0.897' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_resnet_v2.py#L351 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ens_adv_inception_resnet_v2-2592a550.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 1.0% Top 5 Accuracy: 17.32% -->
pytorch-image-models/hfdocs/source/models/ensemble-adversarial.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/ensemble-adversarial.mdx", "repo_id": "pytorch-image-models", "token_count": 2212 }
253
# RexNet **Rank Expansion Networks** (ReXNets) follow a set of new design principles for designing bottlenecks in image classification models. Authors refine each layer by 1) expanding the input channel size of the convolution layer and 2) replacing the [ReLU6s](https://www.paperswithcode.com/method/relu6). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('rexnet_100', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `rexnet_100`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('rexnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{han2020rexnet, title={ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network}, author={Dongyoon Han and Sangdoo Yun and Byeongho Heo and YoungJoon Yoo}, year={2020}, eprint={2007.00992}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: RexNet Paper: Title: 'ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network' URL: https://paperswithcode.com/paper/rexnet-diminishing-representational Models: - Name: rexnet_100 In Collection: RexNet Metadata: FLOPs: 509989377 Parameters: 4800000 File Size: 19417552 Architecture: - Batch Normalization - Convolution - Dropout - ReLU6 - Residual Connection Tasks: - Image Classification Training Techniques: - Label Smoothing - Linear Warmup With Cosine Annealing - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: rexnet_100 LR: 0.5 Epochs: 400 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic Label Smoothing: 0.1 Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L212 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_100-1b4dddf4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.86% Top 5 Accuracy: 93.88% - Name: rexnet_130 In Collection: RexNet Metadata: FLOPs: 848364461 Parameters: 7560000 File Size: 30508197 Architecture: - Batch Normalization - Convolution - Dropout - ReLU6 - Residual Connection Tasks: - Image Classification Training Techniques: - Label Smoothing - Linear Warmup With Cosine Annealing - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: rexnet_130 LR: 0.5 Epochs: 400 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic Label Smoothing: 0.1 Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L218 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_130-590d768e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.49% Top 5 Accuracy: 94.67% - Name: rexnet_150 In Collection: RexNet Metadata: FLOPs: 1122374469 Parameters: 9730000 File Size: 39227315 Architecture: - Batch Normalization - Convolution - Dropout - ReLU6 - Residual Connection Tasks: - Image Classification Training Techniques: - Label Smoothing - Linear Warmup With Cosine Annealing - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: rexnet_150 LR: 0.5 Epochs: 400 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic Label Smoothing: 0.1 Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L224 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_150-bd1a6aa8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.31% Top 5 Accuracy: 95.16% - Name: rexnet_200 In Collection: RexNet Metadata: FLOPs: 1960224938 Parameters: 16370000 File Size: 65862221 Architecture: - Batch Normalization - Convolution - Dropout - ReLU6 - Residual Connection Tasks: - Image Classification Training Techniques: - Label Smoothing - Linear Warmup With Cosine Annealing - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: rexnet_200 LR: 0.5 Epochs: 400 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic Label Smoothing: 0.1 Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L230 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_200-8c0b7f2d.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.63% Top 5 Accuracy: 95.67% -->
pytorch-image-models/hfdocs/source/models/rexnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/rexnet.mdx", "repo_id": "pytorch-image-models", "token_count": 3087 }
254
# TResNet A **TResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that aim to boost accuracy while maintaining GPU training and inference efficiency. They contain several design tricks including a SpaceToDepth stem, [Anti-Alias downsampling](https://paperswithcode.com/method/anti-alias-downsampling), In-Place Activated BatchNorm, Blocks selection and [squeeze-and-excitation layers](https://paperswithcode.com/method/squeeze-and-excitation-block). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('tresnet_l', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tresnet_l`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('tresnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{ridnik2020tresnet, title={TResNet: High Performance GPU-Dedicated Architecture}, author={Tal Ridnik and Hussam Lawen and Asaf Noy and Emanuel Ben Baruch and Gilad Sharir and Itamar Friedman}, year={2020}, eprint={2003.13630}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: TResNet Paper: Title: 'TResNet: High Performance GPU-Dedicated Architecture' URL: https://paperswithcode.com/paper/tresnet-high-performance-gpu-dedicated Models: - Name: tresnet_l In Collection: TResNet Metadata: FLOPs: 10873416792 Parameters: 53456696 File Size: 224440219 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_l LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L267 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_81_5-235b486c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.49% Top 5 Accuracy: 95.62% - Name: tresnet_l_448 In Collection: TResNet Metadata: FLOPs: 43488238584 Parameters: 53456696 File Size: 224440219 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_l_448 LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '448' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L285 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.26% Top 5 Accuracy: 95.98% - Name: tresnet_m In Collection: TResNet Metadata: FLOPs: 5733048064 Parameters: 41282200 File Size: 125861314 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs Training Time: < 24 hours ID: tresnet_m LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L261 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_80_8-dbc13962.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.8% Top 5 Accuracy: 94.86% - Name: tresnet_m_448 In Collection: TResNet Metadata: FLOPs: 22929743104 Parameters: 29278464 File Size: 125861314 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_m_448 LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '448' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L279 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_448-bc359d10.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.72% Top 5 Accuracy: 95.57% - Name: tresnet_xl In Collection: TResNet Metadata: FLOPs: 15162534034 Parameters: 75646610 File Size: 314378965 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_xl LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L273 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_82_0-a2d51b00.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.05% Top 5 Accuracy: 95.93% - Name: tresnet_xl_448 In Collection: TResNet Metadata: FLOPs: 60641712730 Parameters: 75646610 File Size: 224440219 Architecture: - 1x1 Convolution - Anti-Alias Downsampling - Convolution - Global Average Pooling - InPlace-ABN - Leaky ReLU - ReLU - Residual Connection - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - AutoAugment - Cutout - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA 100 GPUs ID: tresnet_xl_448 LR: 0.01 Epochs: 300 Crop Pct: '0.875' Momentum: 0.9 Image Size: '448' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L291 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.06% Top 5 Accuracy: 96.19% -->
pytorch-image-models/hfdocs/source/models/tresnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/tresnet.mdx", "repo_id": "pytorch-image-models", "token_count": 4203 }
255
import numpy as np import pandas as pd results = { 'results-imagenet.csv': [ 'results-imagenet-real.csv', 'results-imagenetv2-matched-frequency.csv', 'results-sketch.csv' ], 'results-imagenet-a-clean.csv': [ 'results-imagenet-a.csv', ], 'results-imagenet-r-clean.csv': [ 'results-imagenet-r.csv', ], } def diff(base_df, test_csv): base_df['mi'] = base_df.model + '-' + base_df.img_size.astype('str') base_models = base_df['mi'].values test_df = pd.read_csv(test_csv) test_df['mi'] = test_df.model + '-' + test_df.img_size.astype('str') test_models = test_df['mi'].values rank_diff = np.zeros_like(test_models, dtype='object') top1_diff = np.zeros_like(test_models, dtype='object') top5_diff = np.zeros_like(test_models, dtype='object') for rank, model in enumerate(test_models): if model in base_models: base_rank = int(np.where(base_models == model)[0]) top1_d = test_df['top1'][rank] - base_df['top1'][base_rank] top5_d = test_df['top5'][rank] - base_df['top5'][base_rank] # rank_diff if rank == base_rank: rank_diff[rank] = f'0' elif rank > base_rank: rank_diff[rank] = f'-{rank - base_rank}' else: rank_diff[rank] = f'+{base_rank - rank}' # top1_diff if top1_d >= .0: top1_diff[rank] = f'+{top1_d:.3f}' else: top1_diff[rank] = f'-{abs(top1_d):.3f}' # top5_diff if top5_d >= .0: top5_diff[rank] = f'+{top5_d:.3f}' else: top5_diff[rank] = f'-{abs(top5_d):.3f}' else: rank_diff[rank] = '' top1_diff[rank] = '' top5_diff[rank] = '' test_df['top1_diff'] = top1_diff test_df['top5_diff'] = top5_diff test_df['rank_diff'] = rank_diff test_df.drop('mi', axis=1, inplace=True) base_df.drop('mi', axis=1, inplace=True) test_df['param_count'] = test_df['param_count'].map('{:,.2f}'.format) test_df.sort_values(['top1', 'top5', 'model'], ascending=[False, False, True], inplace=True) test_df.to_csv(test_csv, index=False, float_format='%.3f') for base_results, test_results in results.items(): base_df = pd.read_csv(base_results) base_df.sort_values(['top1', 'top5', 'model'], ascending=[False, False, True], inplace=True) for test_csv in test_results: diff(base_df, test_csv) base_df['param_count'] = base_df['param_count'].map('{:,.2f}'.format) base_df.to_csv(base_results, index=False, float_format='%.3f')
pytorch-image-models/results/generate_csv_results.py/0
{ "file_path": "pytorch-image-models/results/generate_csv_results.py", "repo_id": "pytorch-image-models", "token_count": 1453 }
256
from .version import __version__ as __version__ from .layers import ( is_scriptable as is_scriptable, is_exportable as is_exportable, set_scriptable as set_scriptable, set_exportable as set_exportable, ) from .models import ( create_model as create_model, list_models as list_models, list_pretrained as list_pretrained, is_model as is_model, list_modules as list_modules, model_entrypoint as model_entrypoint, is_model_pretrained as is_model_pretrained, get_pretrained_cfg as get_pretrained_cfg, get_pretrained_cfg_value as get_pretrained_cfg_value, )
pytorch-image-models/timm/__init__.py/0
{ "file_path": "pytorch-image-models/timm/__init__.py", "repo_id": "pytorch-image-models", "token_count": 219 }
257
""" Mixup and Cutmix Papers: mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412) CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899) Code Reference: CutMix: https://github.com/clovaai/CutMix-PyTorch Hacked together by / Copyright 2019, Ross Wightman """ import numpy as np import torch def one_hot(x, num_classes, on_value=1., off_value=0.): x = x.long().view(-1, 1) return torch.full((x.size()[0], num_classes), off_value, device=x.device).scatter_(1, x, on_value) def mixup_target(target, num_classes, lam=1., smoothing=0.0): off_value = smoothing / num_classes on_value = 1. - smoothing + off_value y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value) y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value) return y1 * lam + y2 * (1. - lam) def rand_bbox(img_shape, lam, margin=0., count=None): """ Standard CutMix bounding-box Generates a random square bbox based on lambda value. This impl includes support for enforcing a border margin as percent of bbox dimensions. Args: img_shape (tuple): Image shape as tuple lam (float): Cutmix lambda value margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image) count (int): Number of bbox to generate """ ratio = np.sqrt(1 - lam) img_h, img_w = img_shape[-2:] cut_h, cut_w = int(img_h * ratio), int(img_w * ratio) margin_y, margin_x = int(margin * cut_h), int(margin * cut_w) cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count) cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count) yl = np.clip(cy - cut_h // 2, 0, img_h) yh = np.clip(cy + cut_h // 2, 0, img_h) xl = np.clip(cx - cut_w // 2, 0, img_w) xh = np.clip(cx + cut_w // 2, 0, img_w) return yl, yh, xl, xh def rand_bbox_minmax(img_shape, minmax, count=None): """ Min-Max CutMix bounding-box Inspired by Darknet cutmix impl, generates a random rectangular bbox based on min/max percent values applied to each dimension of the input image. Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max. Args: img_shape (tuple): Image shape as tuple minmax (tuple or list): Min and max bbox ratios (as percent of image size) count (int): Number of bbox to generate """ assert len(minmax) == 2 img_h, img_w = img_shape[-2:] cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count) cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count) yl = np.random.randint(0, img_h - cut_h, size=count) xl = np.random.randint(0, img_w - cut_w, size=count) yu = yl + cut_h xu = xl + cut_w return yl, yu, xl, xu def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None): """ Generate bbox and apply lambda correction. """ if ratio_minmax is not None: yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count) else: yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count) if correct_lam or ratio_minmax is not None: bbox_area = (yu - yl) * (xu - xl) lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1]) return (yl, yu, xl, xu), lam class Mixup: """ Mixup/Cutmix that applies different params to each element or whole batch Args: mixup_alpha (float): mixup alpha value, mixup is active if > 0. cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0. cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None. prob (float): probability of applying mixup or cutmix per batch or element switch_prob (float): probability of switching to cutmix instead of mixup when both are active mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element) correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders label_smoothing (float): apply label smoothing to the mixed target tensor num_classes (int): number of classes for target """ def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5, mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000): self.mixup_alpha = mixup_alpha self.cutmix_alpha = cutmix_alpha self.cutmix_minmax = cutmix_minmax if self.cutmix_minmax is not None: assert len(self.cutmix_minmax) == 2 # force cutmix alpha == 1.0 when minmax active to keep logic simple & safe self.cutmix_alpha = 1.0 self.mix_prob = prob self.switch_prob = switch_prob self.label_smoothing = label_smoothing self.num_classes = num_classes self.mode = mode self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop) def _params_per_elem(self, batch_size): lam = np.ones(batch_size, dtype=np.float32) use_cutmix = np.zeros(batch_size, dtype=bool) if self.mixup_enabled: if self.mixup_alpha > 0. and self.cutmix_alpha > 0.: use_cutmix = np.random.rand(batch_size) < self.switch_prob lam_mix = np.where( use_cutmix, np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size), np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)) elif self.mixup_alpha > 0.: lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size) elif self.cutmix_alpha > 0.: use_cutmix = np.ones(batch_size, dtype=bool) lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size) else: assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true." lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam) return lam, use_cutmix def _params_per_batch(self): lam = 1. use_cutmix = False if self.mixup_enabled and np.random.rand() < self.mix_prob: if self.mixup_alpha > 0. and self.cutmix_alpha > 0.: use_cutmix = np.random.rand() < self.switch_prob lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \ np.random.beta(self.mixup_alpha, self.mixup_alpha) elif self.mixup_alpha > 0.: lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha) elif self.cutmix_alpha > 0.: use_cutmix = True lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) else: assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true." lam = float(lam_mix) return lam, use_cutmix def _mix_elem(self, x): batch_size = len(x) lam_batch, use_cutmix = self._params_per_elem(batch_size) x_orig = x.clone() # need to keep an unmodified original for mixing source for i in range(batch_size): j = batch_size - i - 1 lam = lam_batch[i] if lam != 1.: if use_cutmix[i]: (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh] lam_batch[i] = lam else: x[i] = x[i] * lam + x_orig[j] * (1 - lam) return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1) def _mix_pair(self, x): batch_size = len(x) lam_batch, use_cutmix = self._params_per_elem(batch_size // 2) x_orig = x.clone() # need to keep an unmodified original for mixing source for i in range(batch_size // 2): j = batch_size - i - 1 lam = lam_batch[i] if lam != 1.: if use_cutmix[i]: (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh] x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh] lam_batch[i] = lam else: x[i] = x[i] * lam + x_orig[j] * (1 - lam) x[j] = x[j] * lam + x_orig[i] * (1 - lam) lam_batch = np.concatenate((lam_batch, lam_batch[::-1])) return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1) def _mix_batch(self, x): lam, use_cutmix = self._params_per_batch() if lam == 1.: return 1. if use_cutmix: (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh] else: x_flipped = x.flip(0).mul_(1. - lam) x.mul_(lam).add_(x_flipped) return lam def __call__(self, x, target): assert len(x) % 2 == 0, 'Batch size should be even when using this' if self.mode == 'elem': lam = self._mix_elem(x) elif self.mode == 'pair': lam = self._mix_pair(x) else: lam = self._mix_batch(x) target = mixup_target(target, self.num_classes, lam, self.label_smoothing) return x, target class FastCollateMixup(Mixup): """ Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch A Mixup impl that's performed while collating the batches. """ def _mix_elem_collate(self, output, batch, half=False): batch_size = len(batch) num_elem = batch_size // 2 if half else batch_size assert len(output) == num_elem lam_batch, use_cutmix = self._params_per_elem(num_elem) is_np = isinstance(batch[0][0], np.ndarray) for i in range(num_elem): j = batch_size - i - 1 lam = lam_batch[i] mixed = batch[i][0] if lam != 1.: if use_cutmix[i]: if not half: mixed = mixed.copy() if is_np else mixed.clone() (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam, ) mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh] lam_batch[i] = lam else: if is_np: mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam) np.rint(mixed, out=mixed) else: mixed = mixed.float() * lam + batch[j][0].float() * (1 - lam) torch.round(mixed, out=mixed) output[i] += torch.from_numpy(mixed.astype(np.uint8)) if is_np else mixed.byte() if half: lam_batch = np.concatenate((lam_batch, np.ones(num_elem))) return torch.tensor(lam_batch).unsqueeze(1) def _mix_pair_collate(self, output, batch): batch_size = len(batch) lam_batch, use_cutmix = self._params_per_elem(batch_size // 2) is_np = isinstance(batch[0][0], np.ndarray) for i in range(batch_size // 2): j = batch_size - i - 1 lam = lam_batch[i] mixed_i = batch[i][0] mixed_j = batch[j][0] assert 0 <= lam <= 1.0 if lam < 1.: if use_cutmix[i]: (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam, ) patch_i = mixed_i[:, yl:yh, xl:xh].copy() if is_np else mixed_i[:, yl:yh, xl:xh].clone() mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh] mixed_j[:, yl:yh, xl:xh] = patch_i lam_batch[i] = lam else: if is_np: mixed_temp = mixed_i.astype(np.float32) * lam + mixed_j.astype(np.float32) * (1 - lam) mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(np.float32) * (1 - lam) mixed_i = mixed_temp np.rint(mixed_j, out=mixed_j) np.rint(mixed_i, out=mixed_i) else: mixed_temp = mixed_i.float() * lam + mixed_j.float() * (1 - lam) mixed_j = mixed_j.float() * lam + mixed_i.float() * (1 - lam) mixed_i = mixed_temp torch.round(mixed_j, out=mixed_j) torch.round(mixed_i, out=mixed_i) output[i] += torch.from_numpy(mixed_i.astype(np.uint8)) if is_np else mixed_i.byte() output[j] += torch.from_numpy(mixed_j.astype(np.uint8)) if is_np else mixed_j.byte() lam_batch = np.concatenate((lam_batch, lam_batch[::-1])) return torch.tensor(lam_batch).unsqueeze(1) def _mix_batch_collate(self, output, batch): batch_size = len(batch) lam, use_cutmix = self._params_per_batch() is_np = isinstance(batch[0][0], np.ndarray) if use_cutmix: (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam, ) for i in range(batch_size): j = batch_size - i - 1 mixed = batch[i][0] if lam != 1.: if use_cutmix: mixed = mixed.copy() if is_np else mixed.clone() # don't want to modify the original while iterating mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh] else: if is_np: mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam) np.rint(mixed, out=mixed) else: mixed = mixed.float() * lam + batch[j][0].float() * (1 - lam) torch.round(mixed, out=mixed) output[i] += torch.from_numpy(mixed.astype(np.uint8)) if is_np else mixed.byte() return lam def __call__(self, batch, _=None): batch_size = len(batch) assert batch_size % 2 == 0, 'Batch size should be even when using this' half = 'half' in self.mode if half: batch_size //= 2 output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) if self.mode == 'elem' or self.mode == 'half': lam = self._mix_elem_collate(output, batch, half=half) elif self.mode == 'pair': lam = self._mix_pair_collate(output, batch) else: lam = self._mix_batch_collate(output, batch) target = torch.tensor([b[1] for b in batch], dtype=torch.int64) target = mixup_target(target, self.num_classes, lam, self.label_smoothing) target = target[:batch_size] return output, target
pytorch-image-models/timm/data/mixup.py/0
{ "file_path": "pytorch-image-models/timm/data/mixup.py", "repo_id": "pytorch-image-models", "token_count": 8184 }
258
""" A dataset reader that reads single tarfile based datasets This reader can read datasets consisting if a single tarfile containing images. I am planning to deprecated it in favour of ParerImageInTar. Hacked together by / Copyright 2020 Ross Wightman """ import os import tarfile from timm.utils.misc import natural_key from .class_map import load_class_map from .img_extensions import get_img_extensions from .reader import Reader def extract_tarinfo(tarfile, class_to_idx=None, sort=True): extensions = get_img_extensions(as_set=True) files = [] labels = [] for ti in tarfile.getmembers(): if not ti.isfile(): continue dirname, basename = os.path.split(ti.path) label = os.path.basename(dirname) ext = os.path.splitext(basename)[1] if ext.lower() in extensions: files.append(ti) labels.append(label) if class_to_idx is None: unique_labels = set(labels) sorted_labels = list(sorted(unique_labels, key=natural_key)) class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} tarinfo_and_targets = [(f, class_to_idx[l]) for f, l in zip(files, labels) if l in class_to_idx] if sort: tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path)) return tarinfo_and_targets, class_to_idx class ReaderImageTar(Reader): """ Single tarfile dataset where classes are mapped to folders within tar NOTE: This class is being deprecated in favour of the more capable ReaderImageInTar that can operate on folders of tars or tars in tars. """ def __init__(self, root, class_map=''): super().__init__() class_to_idx = None if class_map: class_to_idx = load_class_map(class_map, root) assert os.path.isfile(root) self.root = root with tarfile.open(root) as tf: # cannot keep this open across processes, reopen later self.samples, self.class_to_idx = extract_tarinfo(tf, class_to_idx) self.imgs = self.samples self.tarfile = None # lazy init in __getitem__ def __getitem__(self, index): if self.tarfile is None: self.tarfile = tarfile.open(self.root) tarinfo, target = self.samples[index] fileobj = self.tarfile.extractfile(tarinfo) return fileobj, target def __len__(self): return len(self.samples) def _filename(self, index, basename=False, absolute=False): filename = self.samples[index][0].name if basename: filename = os.path.basename(filename) return filename
pytorch-image-models/timm/data/readers/reader_image_tar.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/reader_image_tar.py", "repo_id": "pytorch-image-models", "token_count": 1071 }
259
""" Attention Pool 2D Implementations of 2D spatial feature pooling using multi-head attention instead of average pool. Based on idea in CLIP by OpenAI, licensed Apache 2.0 https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py Hacked together by / Copyright 2021 Ross Wightman """ from typing import Optional, Union, Tuple import torch import torch.nn as nn from .config import use_fused_attn from .helpers import to_2tuple from .pos_embed import resample_abs_pos_embed from .pos_embed_sincos import apply_rot_embed, RotaryEmbedding from .weight_init import trunc_normal_ class RotAttentionPool2d(nn.Module): """ Attention based 2D feature pooling w/ rotary (relative) pos embedding. This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed. https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW """ fused_attn: torch.jit.Final[bool] def __init__( self, in_features: int, out_features: Optional[int] = None, ref_feat_size: Union[int, Tuple[int, int]] = 7, embed_dim: Optional[int] = None, head_dim: Optional[int] = 64, num_heads: Optional[int] = None, qkv_bias: bool = True, qkv_separate: bool = False, pool_type: str = 'token', class_token: bool = False, drop_rate: float = 0., ): super().__init__() assert pool_type in ('', 'token') self.embed_dim = embed_dim = embed_dim or in_features self.in_features = in_features self.out_features = out_features or in_features ref_feat_size = to_2tuple(ref_feat_size) if num_heads is not None: assert embed_dim % num_heads == 0 head_dim = embed_dim // num_heads else: assert embed_dim % head_dim == 0 num_heads = embed_dim // head_dim self.num_heads = num_heads self.head_dim = head_dim self.pool_type = pool_type.lower() self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() if class_token: self.cls_token = nn.Parameter(torch.zeros(1, embed_dim)) else: self.cls_token = None if qkv_separate: self.q = nn.Linear(in_features, embed_dim, bias=qkv_bias) self.k = nn.Linear(in_features, embed_dim, bias=qkv_bias) self.v = nn.Linear(in_features, embed_dim, bias=qkv_bias) self.qkv = None else: self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) self.drop = nn.Dropout(drop_rate) self.proj = nn.Linear(embed_dim, self.out_features) self.pos_embed = RotaryEmbedding(self.head_dim, in_pixels=False, ref_feat_shape=ref_feat_size) def init_weights(self, zero_init_last: bool = False): if self.qkv is None: in_features = self.q.in_features trunc_normal_(self.q.weight, std=in_features ** -0.5) nn.init.zeros_(self.q.bias) trunc_normal_(self.k.weight, std=in_features ** -0.5) nn.init.zeros_(self.k.bias) trunc_normal_(self.v.weight, std=in_features ** -0.5) nn.init.zeros_(self.v.bias) else: in_features = self.qkv.in_features trunc_normal_(self.qkv.weight, std=in_features ** -0.5) nn.init.zeros_(self.qkv.bias) def reset(self, num_classes: Optional[int] = None, pool_type: Optional[str] = None): # NOTE: this module is being used as a head, so need compatible reset() if pool_type is not None: assert pool_type in ('', 'token') self.pool_type = pool_type if num_classes is not None: self.proj = nn.Linear(self.in_features, num_classes) if num_classes > 0 else nn.Identity() self.out_features = num_classes if num_classes > 0 else self.embed_dim def _pool(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor: if self.pool_type == 'token': x = x[:, 0] else: # if not pooled, return spatial output without token x = x[:, 1:].reshape(x.shape[0], H, W, -1).permute(0, 3, 1, 2) return x def forward(self, x, pre_logits: bool = False): B, _, H, W = x.shape N = H * W x = x.flatten(2).transpose(1, 2) if self.cls_token is None: x = torch.cat([x.mean(1, keepdim=True), x], dim=1) else: x = torch.cat([self.cls_token.expand(x.shape[0], -1, -1), x], dim=1) if self.qkv is None: q = self.q(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2) k = self.k(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2) v = self.v(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2) else: x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = x.unbind(0) rse, rce = self.pos_embed.get_embed((H, W)) q = torch.cat([q[:, :, :1, :], apply_rot_embed(q[:, :, 1:, :], rse, rce)], dim=2).type_as(v) k = torch.cat([k[:, :, :1, :], apply_rot_embed(k[:, :, 1:, :], rse, rce)], dim=2).type_as(v) if self.fused_attn: x = nn.functional.scaled_dot_product_attention(q, k, v) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) x = attn @ v x = x.transpose(1, 2).reshape(B, N + 1, -1) x = self.drop(x) if pre_logits: x = self._pool(x, H, W) return x x = self.proj(x) x = self._pool(x, H, W) return x class AttentionPool2d(nn.Module): """ Attention based 2D feature pooling w/ learned (absolute) pos embedding. This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. It was based on impl in CLIP by OpenAI https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network. """ fused_attn: torch.jit.Final[bool] def __init__( self, in_features: int, feat_size: Union[int, Tuple[int, int]] = 7, out_features: Optional[int] = None, embed_dim: Optional[int] = None, head_dim: Optional[int] = 64, num_heads: Optional[int] = None, qkv_bias: bool = True, qkv_separate: bool = False, pool_type: str = 'token', class_token: bool = False, drop_rate: float = 0., ): super().__init__() assert pool_type in ('', 'token') self.embed_dim = embed_dim = embed_dim or in_features self.in_features = in_features self.out_features = out_features or in_features if num_heads is not None: assert embed_dim % num_heads == 0 head_dim = embed_dim // num_heads else: assert embed_dim % head_dim == 0 num_heads = embed_dim // head_dim self.feat_size = to_2tuple(feat_size) self.seq_len = self.feat_size[0] * self.feat_size[1] self.num_heads = num_heads self.head_dim = head_dim self.pool_type = pool_type self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() if class_token: self.cls_token = nn.Parameter(torch.zeros(1, embed_dim)) else: self.cls_token = None if qkv_separate: self.q = nn.Linear(in_features, embed_dim, bias=qkv_bias) self.k = nn.Linear(in_features, embed_dim, bias=qkv_bias) self.v = nn.Linear(in_features, embed_dim, bias=qkv_bias) self.qkv = None else: self.q = self.k = self.v = None self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) self.drop = nn.Dropout(drop_rate) self.proj = nn.Linear(embed_dim, self.out_features) self.pos_embed = nn.Parameter(torch.zeros(self.seq_len + 1, in_features)) self.init_weights() def init_weights(self, zero_init_last: bool = False): if self.qkv is None: in_features = self.q.in_features trunc_normal_(self.q.weight, std=in_features ** -0.5) nn.init.zeros_(self.q.bias) trunc_normal_(self.k.weight, std=in_features ** -0.5) nn.init.zeros_(self.k.bias) trunc_normal_(self.v.weight, std=in_features ** -0.5) nn.init.zeros_(self.v.bias) else: in_features = self.qkv.in_features trunc_normal_(self.qkv.weight, std=in_features ** -0.5) nn.init.zeros_(self.qkv.bias) trunc_normal_(self.pos_embed, std=in_features ** -0.5) def reset(self, num_classes: Optional[int] = None, pool_type: Optional[str] = None): # NOTE: this module is being used as a head, so need compatible reset() if pool_type is not None: assert pool_type in ('', 'token') self.pool_type = pool_type if num_classes is not None: self.proj = nn.Linear(self.in_features, num_classes) if num_classes > 0 else nn.Identity() self.out_features = num_classes if num_classes > 0 else self.embed_dim def _pool(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor: if self.pool_type == 'token': x = x[:, 0] else: # if not pooled, return spatial output without token x = x[:, 1:].reshape(x.shape[0], H, W, -1).permute(0, 3, 1, 2) return x def forward(self, x, pre_logits: bool = False): B, _, H, W = x.shape N = H * W x = x.flatten(2).transpose(1, 2) if self.cls_token is None: x = torch.cat([x.mean(1, keepdim=True), x], dim=1) else: x = torch.cat([self.cls_token.expand(x.shape[0], -1, -1), x], dim=1) pos_embed = resample_abs_pos_embed(self.pos_embed.unsqueeze(0), (H, W), num_prefix_tokens=1) x = x + pos_embed if self.qkv is None: q = self.q(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2) k = self.k(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2) v = self.v(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2) else: x = self.qkv(x).reshape(B, -1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = x.unbind(0) if self.fused_attn: x = nn.functional.scaled_dot_product_attention(q, k, v) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) x = attn @ v x = x.transpose(1, 2).reshape(B, N + 1, -1) x = self.drop(x) if pre_logits: x = self._pool(x, H, W) return x x = self.proj(x) x = self._pool(x, H, W) return x
pytorch-image-models/timm/layers/attention_pool2d.py/0
{ "file_path": "pytorch-image-models/timm/layers/attention_pool2d.py", "repo_id": "pytorch-image-models", "token_count": 5737 }
260
""" EvoNorm in PyTorch Based on `Evolving Normalization-Activation Layers` - https://arxiv.org/abs/2004.02967 @inproceedings{NEURIPS2020, author = {Liu, Hanxiao and Brock, Andy and Simonyan, Karen and Le, Quoc}, booktitle = {Advances in Neural Information Processing Systems}, editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin}, pages = {13539--13550}, publisher = {Curran Associates, Inc.}, title = {Evolving Normalization-Activation Layers}, url = {https://proceedings.neurips.cc/paper/2020/file/9d4c03631b8b0c85ae08bf05eda37d0f-Paper.pdf}, volume = {33}, year = {2020} } An attempt at getting decent performing EvoNorms running in PyTorch. While faster than other PyTorch impl, still quite a ways off the built-in BatchNorm in terms of memory usage and throughput on GPUs. I'm testing these modules on TPU w/ PyTorch XLA. Promising start but currently working around some issues with builtin torch/tensor.var/std. Unlike GPU, similar train speeds for EvoNormS variants and BatchNorm. Hacked together by / Copyright 2020 Ross Wightman """ from typing import Sequence, Union import torch import torch.nn as nn import torch.nn.functional as F from .create_act import create_act_layer from .trace_utils import _assert def instance_std(x, eps: float = 1e-5): std = x.float().var(dim=(2, 3), unbiased=False, keepdim=True).add(eps).sqrt().to(x.dtype) return std.expand(x.shape) def instance_std_tpu(x, eps: float = 1e-5): std = manual_var(x, dim=(2, 3)).add(eps).sqrt() return std.expand(x.shape) # instance_std = instance_std_tpu def instance_rms(x, eps: float = 1e-5): rms = x.float().square().mean(dim=(2, 3), keepdim=True).add(eps).sqrt().to(x.dtype) return rms.expand(x.shape) def manual_var(x, dim: Union[int, Sequence[int]], diff_sqm: bool = False): xm = x.mean(dim=dim, keepdim=True) if diff_sqm: # difference of squared mean and mean squared, faster on TPU can be less stable var = ((x * x).mean(dim=dim, keepdim=True) - (xm * xm)).clamp(0) else: var = ((x - xm) * (x - xm)).mean(dim=dim, keepdim=True) return var def group_std(x, groups: int = 32, eps: float = 1e-5, flatten: bool = False): B, C, H, W = x.shape x_dtype = x.dtype _assert(C % groups == 0, '') if flatten: x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues std = x.float().var(dim=2, unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) else: x = x.reshape(B, groups, C // groups, H, W) std = x.float().var(dim=(2, 3, 4), unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) return std.expand(x.shape).reshape(B, C, H, W) def group_std_tpu(x, groups: int = 32, eps: float = 1e-5, diff_sqm: bool = False, flatten: bool = False): # This is a workaround for some stability / odd behaviour of .var and .std # running on PyTorch XLA w/ TPUs. These manual var impl are producing much better results B, C, H, W = x.shape _assert(C % groups == 0, '') if flatten: x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues var = manual_var(x, dim=-1, diff_sqm=diff_sqm) else: x = x.reshape(B, groups, C // groups, H, W) var = manual_var(x, dim=(2, 3, 4), diff_sqm=diff_sqm) return var.add(eps).sqrt().expand(x.shape).reshape(B, C, H, W) #group_std = group_std_tpu # FIXME TPU temporary def group_rms(x, groups: int = 32, eps: float = 1e-5): B, C, H, W = x.shape _assert(C % groups == 0, '') x_dtype = x.dtype x = x.reshape(B, groups, C // groups, H, W) rms = x.float().square().mean(dim=(2, 3, 4), keepdim=True).add(eps).sqrt_().to(x_dtype) return rms.expand(x.shape).reshape(B, C, H, W) class EvoNorm2dB0(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-3, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.v is not None: nn.init.ones_(self.v) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.v is not None: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) # var = manual_var(x, dim=(0, 2, 3)).squeeze() n = x.numel() / x.shape[1] self.running_var.copy_( self.running_var * (1 - self.momentum) + var.detach() * self.momentum * (n / (n - 1))) else: var = self.running_var left = var.add(self.eps).sqrt_().to(x_dtype).view(v_shape).expand_as(x) v = self.v.to(x_dtype).view(v_shape) right = x * v + instance_std(x, self.eps) x = x / left.max(right) return x * self.weight.to(x_dtype).view(v_shape) + self.bias.to(x_dtype).view(v_shape) class EvoNorm2dB1(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) n = x.numel() / x.shape[1] self.running_var.copy_( self.running_var * (1 - self.momentum) + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) else: var = self.running_var var = var.to(x_dtype).view(v_shape) left = var.add(self.eps).sqrt_() right = (x + 1) * instance_rms(x, self.eps) x = x / left.max(right) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dB2(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) n = x.numel() / x.shape[1] self.running_var.copy_( self.running_var * (1 - self.momentum) + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) else: var = self.running_var var = var.to(x_dtype).view(v_shape) left = var.add(self.eps).sqrt_() right = instance_rms(x, self.eps) - x x = x / left.max(right) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS0(nn.Module): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-5, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.v is not None: nn.init.ones_(self.v) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.v is not None: v = self.v.view(v_shape).to(x_dtype) x = x * (x * v).sigmoid() / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS0a(EvoNorm2dS0): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-3, **_): super().__init__( num_features, groups=groups, group_size=group_size, apply_act=apply_act, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) d = group_std(x, self.groups, self.eps) if self.v is not None: v = self.v.view(v_shape).to(x_dtype) x = x * (x * v).sigmoid() x = x / d return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS1(nn.Module): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-5, **_): super().__init__() act_layer = act_layer or nn.SiLU self.apply_act = apply_act # apply activation (non-linearity) if act_layer is not None and apply_act: self.act = create_act_layer(act_layer) else: self.act = nn.Identity() if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.pre_act_norm = False self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: x = self.act(x) / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS1a(EvoNorm2dS1): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-3, **_): super().__init__( num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = self.act(x) / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS2(nn.Module): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-5, **_): super().__init__() act_layer = act_layer or nn.SiLU self.apply_act = apply_act # apply activation (non-linearity) if act_layer is not None and apply_act: self.act = create_act_layer(act_layer) else: self.act = nn.Identity() if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: x = self.act(x) / group_rms(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS2a(EvoNorm2dS2): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-3, **_): super().__init__( num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = self.act(x) / group_rms(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
pytorch-image-models/timm/layers/evo_norm.py/0
{ "file_path": "pytorch-image-models/timm/layers/evo_norm.py", "repo_id": "pytorch-image-models", "token_count": 6684 }
261
""" Median Pool Hacked together by / Copyright 2020 Ross Wightman """ import torch.nn as nn import torch.nn.functional as F from .helpers import to_2tuple, to_4tuple class MedianPool2d(nn.Module): """ Median pool (usable as median filter when stride=1) module. Args: kernel_size: size of pooling kernel, int or 2-tuple stride: pool stride, int or 2-tuple padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad same: override padding and enforce same padding, boolean """ def __init__(self, kernel_size=3, stride=1, padding=0, same=False): super(MedianPool2d, self).__init__() self.k = to_2tuple(kernel_size) self.stride = to_2tuple(stride) self.padding = to_4tuple(padding) # convert to l, r, t, b self.same = same def _padding(self, x): if self.same: ih, iw = x.size()[2:] if ih % self.stride[0] == 0: ph = max(self.k[0] - self.stride[0], 0) else: ph = max(self.k[0] - (ih % self.stride[0]), 0) if iw % self.stride[1] == 0: pw = max(self.k[1] - self.stride[1], 0) else: pw = max(self.k[1] - (iw % self.stride[1]), 0) pl = pw // 2 pr = pw - pl pt = ph // 2 pb = ph - pt padding = (pl, pr, pt, pb) else: padding = self.padding return padding def forward(self, x): x = F.pad(x, self._padding(x), mode='reflect') x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] return x
pytorch-image-models/timm/layers/median_pool.py/0
{ "file_path": "pytorch-image-models/timm/layers/median_pool.py", "repo_id": "pytorch-image-models", "token_count": 883 }
262
""" Depthwise Separable Conv Modules Basic DWS convs. Other variations of DWS exist with batch norm or activations between the DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception. Hacked together by / Copyright 2020 Ross Wightman """ from torch import nn as nn from .create_conv2d import create_conv2d from .create_norm_act import get_norm_act_layer class SeparableConvNormAct(nn.Module): """ Separable Conv w/ trailing Norm and Activation """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, apply_act=True, drop_layer=None): super(SeparableConvNormAct, self).__init__() self.conv_dw = create_conv2d( in_channels, int(in_channels * channel_multiplier), kernel_size, stride=stride, dilation=dilation, padding=padding, depthwise=True) self.conv_pw = create_conv2d( int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) norm_act_layer = get_norm_act_layer(norm_layer, act_layer) norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) @property def in_channels(self): return self.conv_dw.in_channels @property def out_channels(self): return self.conv_pw.out_channels def forward(self, x): x = self.conv_dw(x) x = self.conv_pw(x) x = self.bn(x) return x SeparableConvBnAct = SeparableConvNormAct class SeparableConv2d(nn.Module): """ Separable Conv """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, channel_multiplier=1.0, pw_kernel_size=1): super(SeparableConv2d, self).__init__() self.conv_dw = create_conv2d( in_channels, int(in_channels * channel_multiplier), kernel_size, stride=stride, dilation=dilation, padding=padding, depthwise=True) self.conv_pw = create_conv2d( int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) @property def in_channels(self): return self.conv_dw.in_channels @property def out_channels(self): return self.conv_pw.out_channels def forward(self, x): x = self.conv_dw(x) x = self.conv_pw(x) return x
pytorch-image-models/timm/layers/separable_conv.py/0
{ "file_path": "pytorch-image-models/timm/layers/separable_conv.py", "repo_id": "pytorch-image-models", "token_count": 1138 }
263
import dataclasses import logging import os from copy import deepcopy from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union from torch import nn as nn from torch.hub import load_state_dict_from_url from timm.models._features import FeatureListNet, FeatureDictNet, FeatureHookNet, FeatureGetterNet from timm.models._features_fx import FeatureGraphNet from timm.models._helpers import load_state_dict from timm.models._hub import has_hf_hub, download_cached_file, check_cached_file, load_state_dict_from_hf, \ load_state_dict_from_path, load_custom_from_hf from timm.models._manipulate import adapt_input_conv from timm.models._pretrained import PretrainedCfg from timm.models._prune import adapt_model_from_file from timm.models._registry import get_pretrained_cfg _logger = logging.getLogger(__name__) # Global variables for rarely used pretrained checkpoint download progress and hash check. # Use set_pretrained_download_progress / set_pretrained_check_hash functions to toggle. _DOWNLOAD_PROGRESS = False _CHECK_HASH = False _USE_OLD_CACHE = int(os.environ.get('TIMM_USE_OLD_CACHE', 0)) > 0 __all__ = [ 'set_pretrained_download_progress', 'set_pretrained_check_hash', 'load_custom_pretrained', 'load_pretrained', 'pretrained_cfg_for_features', 'resolve_pretrained_cfg', 'build_model_with_cfg', ] ModelT = TypeVar("ModelT", bound=nn.Module) # any subclass of nn.Module def _resolve_pretrained_source(pretrained_cfg: Dict[str, Any]) -> Tuple[str, str]: cfg_source = pretrained_cfg.get('source', '') pretrained_url = pretrained_cfg.get('url', None) pretrained_file = pretrained_cfg.get('file', None) pretrained_sd = pretrained_cfg.get('state_dict', None) hf_hub_id = pretrained_cfg.get('hf_hub_id', None) # resolve where to load pretrained weights from load_from = '' pretrained_loc = '' if cfg_source == 'hf-hub' and has_hf_hub(necessary=True): # hf-hub specified as source via model identifier load_from = 'hf-hub' assert hf_hub_id pretrained_loc = hf_hub_id elif cfg_source == 'local-dir': load_from = 'local-dir' pretrained_loc = pretrained_file else: # default source == timm or unspecified if pretrained_sd: # direct state_dict pass through is the highest priority load_from = 'state_dict' pretrained_loc = pretrained_sd assert isinstance(pretrained_loc, dict) elif pretrained_file: # file load override is the second-highest priority if set load_from = 'file' pretrained_loc = pretrained_file else: old_cache_valid = False if _USE_OLD_CACHE: # prioritized old cached weights if exists and env var enabled old_cache_valid = check_cached_file(pretrained_url) if pretrained_url else False if not old_cache_valid and hf_hub_id and has_hf_hub(necessary=True): # hf-hub available as alternate weight source in default_cfg load_from = 'hf-hub' pretrained_loc = hf_hub_id elif pretrained_url: load_from = 'url' pretrained_loc = pretrained_url if load_from == 'hf-hub' and pretrained_cfg.get('hf_hub_filename', None): # if a filename override is set, return tuple for location w/ (hub_id, filename) pretrained_loc = pretrained_loc, pretrained_cfg['hf_hub_filename'] return load_from, pretrained_loc def set_pretrained_download_progress(enable: bool = True) -> None: """ Set download progress for pretrained weights on/off (globally). """ global _DOWNLOAD_PROGRESS _DOWNLOAD_PROGRESS = enable def set_pretrained_check_hash(enable: bool = True) -> None: """ Set hash checking for pretrained weights on/off (globally). """ global _CHECK_HASH _CHECK_HASH = enable def load_custom_pretrained( model: nn.Module, pretrained_cfg: Optional[Dict[str, Any]] = None, load_fn: Optional[Callable] = None, cache_dir: Optional[Union[str, Path]] = None, ) -> None: """Loads a custom (read non .pth) weight file Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls a passed in custom load fun, or the `load_pretrained` model member fn. If the object is already present in `model_dir`, it's deserialized and returned. The default value of `model_dir` is ``<hub_dir>/checkpoints`` where `hub_dir` is the directory returned by :func:`~torch.hub.get_dir`. Args: model: The instantiated model to load weights into pretrained_cfg: Default pretrained model cfg load_fn: An external standalone fn that loads weights into provided model, otherwise a fn named 'load_pretrained' on the model will be called if it exists cache_dir: Override model checkpoint cache dir for this load """ pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None) if not pretrained_cfg: _logger.warning("Invalid pretrained config, cannot load weights.") return load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg) if not load_from: _logger.warning("No pretrained weights exist for this model. Using random initialization.") return if load_from == 'hf-hub': _logger.warning("Hugging Face hub not currently supported for custom load pretrained models.") elif load_from == 'url': pretrained_loc = download_cached_file( pretrained_loc, check_hash=_CHECK_HASH, progress=_DOWNLOAD_PROGRESS, cache_dir=cache_dir, ) if load_fn is not None: load_fn(model, pretrained_loc) elif hasattr(model, 'load_pretrained'): model.load_pretrained(pretrained_loc) else: _logger.warning("Valid function to load pretrained weights is not available, using random initialization.") def load_pretrained( model: nn.Module, pretrained_cfg: Optional[Dict[str, Any]] = None, num_classes: int = 1000, in_chans: int = 3, filter_fn: Optional[Callable] = None, strict: bool = True, cache_dir: Optional[Union[str, Path]] = None, ) -> None: """ Load pretrained checkpoint Args: model: PyTorch module pretrained_cfg: Configuration for pretrained weights / target dataset num_classes: Number of classes for target model. Will adapt pretrained if different. in_chans: Number of input chans for target model. Will adapt pretrained if different. filter_fn: state_dict filter fn for load (takes state_dict, model as args) strict: Strict load of checkpoint cache_dir: Override model checkpoint cache dir for this load """ pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None) if not pretrained_cfg: raise RuntimeError("Invalid pretrained config, cannot load weights. Use `pretrained=False` for random init.") load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg) if load_from == 'state_dict': _logger.info(f'Loading pretrained weights from state dict') state_dict = pretrained_loc # pretrained_loc is the actual state dict for this override elif load_from == 'file': _logger.info(f'Loading pretrained weights from file ({pretrained_loc})') if pretrained_cfg.get('custom_load', False): model.load_pretrained(pretrained_loc) return else: state_dict = load_state_dict(pretrained_loc) elif load_from == 'url': _logger.info(f'Loading pretrained weights from url ({pretrained_loc})') if pretrained_cfg.get('custom_load', False): pretrained_loc = download_cached_file( pretrained_loc, progress=_DOWNLOAD_PROGRESS, check_hash=_CHECK_HASH, cache_dir=cache_dir, ) model.load_pretrained(pretrained_loc) return else: try: state_dict = load_state_dict_from_url( pretrained_loc, map_location='cpu', progress=_DOWNLOAD_PROGRESS, check_hash=_CHECK_HASH, weights_only=True, model_dir=cache_dir, ) except TypeError: state_dict = load_state_dict_from_url( pretrained_loc, map_location='cpu', progress=_DOWNLOAD_PROGRESS, check_hash=_CHECK_HASH, model_dir=cache_dir, ) elif load_from == 'hf-hub': _logger.info(f'Loading pretrained weights from Hugging Face hub ({pretrained_loc})') if isinstance(pretrained_loc, (list, tuple)): custom_load = pretrained_cfg.get('custom_load', False) if isinstance(custom_load, str) and custom_load == 'hf': load_custom_from_hf(*pretrained_loc, model, cache_dir=cache_dir) return else: state_dict = load_state_dict_from_hf(*pretrained_loc, cache_dir=cache_dir) else: state_dict = load_state_dict_from_hf(pretrained_loc, weights_only=True, cache_dir=cache_dir) elif load_from == 'local-dir': _logger.info(f'Loading pretrained weights from local directory ({pretrained_loc})') pretrained_path = Path(pretrained_loc) if pretrained_path.is_dir(): state_dict = load_state_dict_from_path(pretrained_path) else: raise RuntimeError(f"Specified path is not a directory: {pretrained_loc}") else: model_name = pretrained_cfg.get('architecture', 'this model') raise RuntimeError(f"No pretrained weights exist for {model_name}. Use `pretrained=False` for random init.") if filter_fn is not None: try: state_dict = filter_fn(state_dict, model) except TypeError as e: # for backwards compat with filter fn that take one arg state_dict = filter_fn(state_dict) input_convs = pretrained_cfg.get('first_conv', None) if input_convs is not None and in_chans != 3: if isinstance(input_convs, str): input_convs = (input_convs,) for input_conv_name in input_convs: weight_name = input_conv_name + '.weight' try: state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name]) _logger.info( f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)') except NotImplementedError as e: del state_dict[weight_name] strict = False _logger.warning( f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.') classifiers = pretrained_cfg.get('classifier', None) label_offset = pretrained_cfg.get('label_offset', 0) if classifiers is not None: if isinstance(classifiers, str): classifiers = (classifiers,) if num_classes != pretrained_cfg['num_classes']: for classifier_name in classifiers: # completely discard fully connected if model num_classes doesn't match pretrained weights state_dict.pop(classifier_name + '.weight', None) state_dict.pop(classifier_name + '.bias', None) strict = False elif label_offset > 0: for classifier_name in classifiers: # special case for pretrained weights with an extra background class in pretrained weights classifier_weight = state_dict[classifier_name + '.weight'] state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:] classifier_bias = state_dict[classifier_name + '.bias'] state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:] load_result = model.load_state_dict(state_dict, strict=strict) if load_result.missing_keys: _logger.info( f'Missing keys ({", ".join(load_result.missing_keys)}) discovered while loading pretrained weights.' f' This is expected if model is being adapted.') if load_result.unexpected_keys: _logger.warning( f'Unexpected keys ({", ".join(load_result.unexpected_keys)}) found while loading pretrained weights.' f' This may be expected if model is being adapted.') def pretrained_cfg_for_features(pretrained_cfg: Dict[str, Any]) -> Dict[str, Any]: pretrained_cfg = deepcopy(pretrained_cfg) # remove default pretrained cfg fields that don't have much relevance for feature backbone to_remove = ('num_classes', 'classifier', 'global_pool') # add default final pool size? for tr in to_remove: pretrained_cfg.pop(tr, None) return pretrained_cfg def _filter_kwargs(kwargs: Dict[str, Any], names: List[str]) -> None: if not kwargs or not names: return for n in names: kwargs.pop(n, None) def _update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter) -> None: """ Update the default_cfg and kwargs before passing to model Args: pretrained_cfg: input pretrained cfg (updated in-place) kwargs: keyword args passed to model build fn (updated in-place) kwargs_filter: keyword arg keys that must be removed before model __init__ """ # Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs) default_kwarg_names = ('num_classes', 'global_pool', 'in_chans') if pretrained_cfg.get('fixed_input_size', False): # if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size default_kwarg_names += ('img_size',) for n in default_kwarg_names: # for legacy reasons, model __init__args uses img_size + in_chans as separate args while # pretrained_cfg has one input_size=(C, H ,W) entry if n == 'img_size': input_size = pretrained_cfg.get('input_size', None) if input_size is not None: assert len(input_size) == 3 kwargs.setdefault(n, input_size[-2:]) elif n == 'in_chans': input_size = pretrained_cfg.get('input_size', None) if input_size is not None: assert len(input_size) == 3 kwargs.setdefault(n, input_size[0]) elif n == 'num_classes': default_val = pretrained_cfg.get(n, None) # if default is < 0, don't pass through to model if default_val is not None and default_val >= 0: kwargs.setdefault(n, pretrained_cfg[n]) else: default_val = pretrained_cfg.get(n, None) if default_val is not None: kwargs.setdefault(n, pretrained_cfg[n]) # Filter keyword args for task specific model variants (some 'features only' models, etc.) _filter_kwargs(kwargs, names=kwargs_filter) def resolve_pretrained_cfg( variant: str, pretrained_cfg: Optional[Union[str, Dict[str, Any]]] = None, pretrained_cfg_overlay: Optional[Dict[str, Any]] = None, ) -> PretrainedCfg: """Resolve pretrained configuration from various sources.""" model_with_tag = variant pretrained_tag = None if pretrained_cfg: if isinstance(pretrained_cfg, dict): # pretrained_cfg dict passed as arg, validate by converting to PretrainedCfg pretrained_cfg = PretrainedCfg(**pretrained_cfg) elif isinstance(pretrained_cfg, str): pretrained_tag = pretrained_cfg pretrained_cfg = None # fallback to looking up pretrained cfg in model registry by variant identifier if not pretrained_cfg: if pretrained_tag: model_with_tag = '.'.join([variant, pretrained_tag]) pretrained_cfg = get_pretrained_cfg(model_with_tag) if not pretrained_cfg: _logger.warning( f"No pretrained configuration specified for {model_with_tag} model. Using a default." f" Please add a config to the model pretrained_cfg registry or pass explicitly.") pretrained_cfg = PretrainedCfg() # instance with defaults pretrained_cfg_overlay = pretrained_cfg_overlay or {} if not pretrained_cfg.architecture: pretrained_cfg_overlay.setdefault('architecture', variant) pretrained_cfg = dataclasses.replace(pretrained_cfg, **pretrained_cfg_overlay) return pretrained_cfg def build_model_with_cfg( model_cls: Union[Type[ModelT], Callable[..., ModelT]], variant: str, pretrained: bool, pretrained_cfg: Optional[Dict] = None, pretrained_cfg_overlay: Optional[Dict] = None, model_cfg: Optional[Any] = None, feature_cfg: Optional[Dict] = None, pretrained_strict: bool = True, pretrained_filter_fn: Optional[Callable] = None, cache_dir: Optional[Union[str, Path]] = None, kwargs_filter: Optional[Tuple[str]] = None, **kwargs, ) -> ModelT: """ Build model with specified default_cfg and optional model_cfg This helper fn aids in the construction of a model including: * handling default_cfg and associated pretrained weight loading * passing through optional model_cfg for models with config based arch spec * features_only model adaptation * pruning config / model adaptation Args: model_cls: Model class variant: Model variant name pretrained: Load the pretrained weights pretrained_cfg: Model's pretrained weight/task config pretrained_cfg_overlay: Entries that will override those in pretrained_cfg model_cfg: Model's architecture config feature_cfg: Feature extraction adapter config pretrained_strict: Load pretrained weights strictly pretrained_filter_fn: Filter callable for pretrained weights cache_dir: Override model cache dir for Hugging Face Hub and Torch checkpoints kwargs_filter: Kwargs keys to filter (remove) before passing to model **kwargs: Model args passed through to model __init__ """ pruned = kwargs.pop('pruned', False) features = False feature_cfg = feature_cfg or {} # resolve and update model pretrained config and model kwargs pretrained_cfg = resolve_pretrained_cfg( variant, pretrained_cfg=pretrained_cfg, pretrained_cfg_overlay=pretrained_cfg_overlay ) pretrained_cfg = pretrained_cfg.to_dict() _update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter) # Setup for feature extraction wrapper done at end of this fn if kwargs.pop('features_only', False): features = True feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4)) if 'out_indices' in kwargs: feature_cfg['out_indices'] = kwargs.pop('out_indices') if 'feature_cls' in kwargs: feature_cfg['feature_cls'] = kwargs.pop('feature_cls') # Instantiate the model if model_cfg is None: model = model_cls(**kwargs) else: model = model_cls(cfg=model_cfg, **kwargs) model.pretrained_cfg = pretrained_cfg model.default_cfg = model.pretrained_cfg # alias for backwards compat if pruned: model = adapt_model_from_file(model, variant) # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000)) if pretrained: load_pretrained( model, pretrained_cfg=pretrained_cfg, num_classes=num_classes_pretrained, in_chans=kwargs.get('in_chans', 3), filter_fn=pretrained_filter_fn, strict=pretrained_strict, cache_dir=cache_dir, ) # Wrap the model in a feature extraction module if enabled if features: use_getter = False if 'feature_cls' in feature_cfg: feature_cls = feature_cfg.pop('feature_cls') if isinstance(feature_cls, str): feature_cls = feature_cls.lower() # flatten_sequential only valid for some feature extractors if feature_cls not in ('dict', 'list', 'hook'): feature_cfg.pop('flatten_sequential', None) if 'hook' in feature_cls: feature_cls = FeatureHookNet elif feature_cls == 'list': feature_cls = FeatureListNet elif feature_cls == 'dict': feature_cls = FeatureDictNet elif feature_cls == 'fx': feature_cls = FeatureGraphNet elif feature_cls == 'getter': use_getter = True feature_cls = FeatureGetterNet else: assert False, f'Unknown feature class {feature_cls}' else: feature_cls = FeatureListNet output_fmt = getattr(model, 'output_fmt', None) if output_fmt is not None and not use_getter: # don't set default for intermediate feat getter feature_cfg.setdefault('output_fmt', output_fmt) model = feature_cls(model, **feature_cfg) model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back pretrained cfg model.default_cfg = model.pretrained_cfg # alias for rename backwards compat (default_cfg -> pretrained_cfg) return model
pytorch-image-models/timm/models/_builder.py/0
{ "file_path": "pytorch-image-models/timm/models/_builder.py", "repo_id": "pytorch-image-models", "token_count": 9114 }
264
""" Model Registry Hacked together by / Copyright 2020 Ross Wightman """ import fnmatch import re import sys import warnings from collections import defaultdict, deque from copy import deepcopy from dataclasses import replace from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Sequence, Union, Tuple from ._pretrained import PretrainedCfg, DefaultCfg __all__ = [ 'split_model_name_tag', 'get_arch_name', 'register_model', 'generate_default_cfgs', 'list_models', 'list_pretrained', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules', 'get_pretrained_cfg_value', 'is_model_pretrained', 'get_arch_pretrained_cfgs' ] _module_to_models: Dict[str, Set[str]] = defaultdict(set) # dict of sets to check membership of model in module _model_to_module: Dict[str, str] = {} # mapping of model names to module names _model_entrypoints: Dict[str, Callable[..., Any]] = {} # mapping of model names to architecture entrypoint fns _model_has_pretrained: Set[str] = set() # set of model names that have pretrained weight url present _model_default_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch -> default cfg objects _model_pretrained_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch.tag -> pretrained cfgs _model_with_tags: Dict[str, List[str]] = defaultdict(list) # shortcut to map each model arch to all model + tag names _module_to_deprecated_models: Dict[str, Dict[str, Optional[str]]] = defaultdict(dict) _deprecated_models: Dict[str, Optional[str]] = {} def split_model_name_tag(model_name: str, no_tag: str = '') -> Tuple[str, str]: model_name, *tag_list = model_name.split('.', 1) tag = tag_list[0] if tag_list else no_tag return model_name, tag def get_arch_name(model_name: str) -> str: return split_model_name_tag(model_name)[0] def generate_default_cfgs(cfgs: Dict[str, Union[Dict[str, Any], PretrainedCfg]]): out = defaultdict(DefaultCfg) default_set = set() # no tag and tags ending with * are prioritized as default for k, v in cfgs.items(): if isinstance(v, dict): v = PretrainedCfg(**v) has_weights = v.has_weights model, tag = split_model_name_tag(k) is_default_set = model in default_set priority = (has_weights and not tag) or (tag.endswith('*') and not is_default_set) tag = tag.strip('*') default_cfg = out[model] if priority: default_cfg.tags.appendleft(tag) default_set.add(model) elif has_weights and not default_cfg.is_pretrained: default_cfg.tags.appendleft(tag) else: default_cfg.tags.append(tag) if has_weights: default_cfg.is_pretrained = True default_cfg.cfgs[tag] = v return out def register_model(fn: Callable[..., Any]) -> Callable[..., Any]: # lookup containing module mod = sys.modules[fn.__module__] module_name_split = fn.__module__.split('.') module_name = module_name_split[-1] if len(module_name_split) else '' # add model to __all__ in module model_name = fn.__name__ if hasattr(mod, '__all__'): mod.__all__.append(model_name) else: mod.__all__ = [model_name] # type: ignore # add entries to registry dict/sets if model_name in _model_entrypoints: warnings.warn( f'Overwriting {model_name} in registry with {fn.__module__}.{model_name}. This is because the name being ' 'registered conflicts with an existing name. Please check if this is not expected.', stacklevel=2, ) _model_entrypoints[model_name] = fn _model_to_module[model_name] = module_name _module_to_models[module_name].add(model_name) if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs: # this will catch all models that have entrypoint matching cfg key, but miss any aliasing # entrypoints or non-matching combos default_cfg = mod.default_cfgs[model_name] if not isinstance(default_cfg, DefaultCfg): # new style default cfg dataclass w/ multiple entries per model-arch assert isinstance(default_cfg, dict) # old style cfg dict per model-arch pretrained_cfg = PretrainedCfg(**default_cfg) default_cfg = DefaultCfg(tags=deque(['']), cfgs={'': pretrained_cfg}) for tag_idx, tag in enumerate(default_cfg.tags): is_default = tag_idx == 0 pretrained_cfg = default_cfg.cfgs[tag] model_name_tag = '.'.join([model_name, tag]) if tag else model_name replace_items = dict(architecture=model_name, tag=tag if tag else None) if pretrained_cfg.hf_hub_id and pretrained_cfg.hf_hub_id == 'timm/': # auto-complete hub name w/ architecture.tag replace_items['hf_hub_id'] = pretrained_cfg.hf_hub_id + model_name_tag pretrained_cfg = replace(pretrained_cfg, **replace_items) if is_default: _model_pretrained_cfgs[model_name] = pretrained_cfg if pretrained_cfg.has_weights: # add tagless entry if it's default and has weights _model_has_pretrained.add(model_name) if tag: _model_pretrained_cfgs[model_name_tag] = pretrained_cfg if pretrained_cfg.has_weights: # add model w/ tag if tag is valid _model_has_pretrained.add(model_name_tag) _model_with_tags[model_name].append(model_name_tag) else: _model_with_tags[model_name].append(model_name) # has empty tag (to slowly remove these instances) _model_default_cfgs[model_name] = default_cfg return fn def _deprecated_model_shim(deprecated_name: str, current_fn: Callable = None, current_tag: str = ''): def _fn(pretrained=False, **kwargs): assert current_fn is not None, f'Model {deprecated_name} has been removed with no replacement.' current_name = '.'.join([current_fn.__name__, current_tag]) if current_tag else current_fn.__name__ warnings.warn(f'Mapping deprecated model name {deprecated_name} to current {current_name}.', stacklevel=2) pretrained_cfg = kwargs.pop('pretrained_cfg', None) return current_fn(pretrained=pretrained, pretrained_cfg=pretrained_cfg or current_tag, **kwargs) return _fn def register_model_deprecations(module_name: str, deprecation_map: Dict[str, Optional[str]]): mod = sys.modules[module_name] module_name_split = module_name.split('.') module_name = module_name_split[-1] if len(module_name_split) else '' for deprecated, current in deprecation_map.items(): if hasattr(mod, '__all__'): mod.__all__.append(deprecated) current_fn = None current_tag = '' if current: current_name, current_tag = split_model_name_tag(current) current_fn = getattr(mod, current_name) deprecated_entrypoint_fn = _deprecated_model_shim(deprecated, current_fn, current_tag) setattr(mod, deprecated, deprecated_entrypoint_fn) _model_entrypoints[deprecated] = deprecated_entrypoint_fn _model_to_module[deprecated] = module_name _module_to_models[module_name].add(deprecated) _deprecated_models[deprecated] = current _module_to_deprecated_models[module_name][deprecated] = current def _natural_key(string_: str) -> List[Union[int, str]]: """See https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/""" return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] def _expand_filter(filter: str): """ expand a 'base_filter' to 'base_filter.*' if no tag portion""" filter_base, filter_tag = split_model_name_tag(filter) if not filter_tag: return ['.'.join([filter_base, '*']), filter] else: return [filter] def list_models( filter: Union[str, List[str]] = '', module: Union[str, List[str]] = '', pretrained: bool = False, exclude_filters: Union[str, List[str]] = '', name_matches_cfg: bool = False, include_tags: Optional[bool] = None, ) -> List[str]: """ Return list of available model names, sorted alphabetically Args: filter - Wildcard filter string that works with fnmatch module - Limit model selection to a specific submodule (ie 'vision_transformer') pretrained - Include only models with valid pretrained weights if True exclude_filters - Wildcard filters to exclude models after including them with filter name_matches_cfg - Include only models w/ model_name matching default_cfg name (excludes some aliases) include_tags - Include pretrained tags in model names (model.tag). If None, defaults set to True when pretrained=True else False (default: None) Returns: models - The sorted list of models Example: model_list('gluon_resnet*') -- returns all models starting with 'gluon_resnet' model_list('*resnext*, 'resnet') -- returns all models with 'resnext' in 'resnet' module """ if filter: include_filters = filter if isinstance(filter, (tuple, list)) else [filter] else: include_filters = [] if include_tags is None: # FIXME should this be default behaviour? or default to include_tags=True? include_tags = pretrained if not module: all_models: Set[str] = set(_model_entrypoints.keys()) else: if isinstance(module, str): all_models: Set[str] = _module_to_models[module] else: assert isinstance(module, Sequence) all_models: Set[str] = set() for m in module: all_models.update(_module_to_models[m]) all_models = all_models - _deprecated_models.keys() # remove deprecated models from listings if include_tags: # expand model names to include names w/ pretrained tags models_with_tags: Set[str] = set() for m in all_models: models_with_tags.update(_model_with_tags[m]) all_models = models_with_tags # expand include and exclude filters to include a '.*' for proper match if no tags in filter include_filters = [ef for f in include_filters for ef in _expand_filter(f)] exclude_filters = [ef for f in exclude_filters for ef in _expand_filter(f)] if include_filters: models: Set[str] = set() for f in include_filters: include_models = fnmatch.filter(all_models, f) # include these models if len(include_models): models = models.union(include_models) else: models = all_models if exclude_filters: if not isinstance(exclude_filters, (tuple, list)): exclude_filters = [exclude_filters] for xf in exclude_filters: exclude_models = fnmatch.filter(models, xf) # exclude these models if len(exclude_models): models = models.difference(exclude_models) if pretrained: models = _model_has_pretrained.intersection(models) if name_matches_cfg: models = set(_model_pretrained_cfgs).intersection(models) return sorted(models, key=_natural_key) def list_pretrained( filter: Union[str, List[str]] = '', exclude_filters: str = '', ) -> List[str]: return list_models( filter=filter, pretrained=True, exclude_filters=exclude_filters, include_tags=True, ) def get_deprecated_models(module: str = '') -> Dict[str, str]: all_deprecated = _module_to_deprecated_models[module] if module else _deprecated_models return deepcopy(all_deprecated) def is_model(model_name: str) -> bool: """ Check if a model name exists """ arch_name = get_arch_name(model_name) return arch_name in _model_entrypoints def model_entrypoint(model_name: str, module_filter: Optional[str] = None) -> Callable[..., Any]: """Fetch a model entrypoint for specified model name """ arch_name = get_arch_name(model_name) if module_filter and arch_name not in _module_to_models.get(module_filter, {}): raise RuntimeError(f'Model ({model_name} not found in module {module_filter}.') return _model_entrypoints[arch_name] def list_modules() -> List[str]: """ Return list of module names that contain models / model entrypoints """ modules = _module_to_models.keys() return sorted(modules) def is_model_in_modules( model_name: str, module_names: Union[Tuple[str, ...], List[str], Set[str]] ) -> bool: """Check if a model exists within a subset of modules Args: model_name - name of model to check module_names - names of modules to search in """ arch_name = get_arch_name(model_name) assert isinstance(module_names, (tuple, list, set)) return any(arch_name in _module_to_models[n] for n in module_names) def is_model_pretrained(model_name: str) -> bool: return model_name in _model_has_pretrained def get_pretrained_cfg(model_name: str, allow_unregistered: bool = True) -> Optional[PretrainedCfg]: if model_name in _model_pretrained_cfgs: return deepcopy(_model_pretrained_cfgs[model_name]) arch_name, tag = split_model_name_tag(model_name) if arch_name in _model_default_cfgs: # if model arch exists, but the tag is wrong, error out raise RuntimeError(f'Invalid pretrained tag ({tag}) for {arch_name}.') if allow_unregistered: # if model arch doesn't exist, it has no pretrained_cfg registered, allow a default to be created return None raise RuntimeError(f'Model architecture ({arch_name}) has no pretrained cfg registered.') def get_pretrained_cfg_value(model_name: str, cfg_key: str) -> Optional[Any]: """ Get a specific model default_cfg value by key. None if key doesn't exist. """ cfg = get_pretrained_cfg(model_name, allow_unregistered=False) return getattr(cfg, cfg_key, None) def get_arch_pretrained_cfgs(model_name: str) -> Dict[str, PretrainedCfg]: """ Get all pretrained cfgs for a given architecture. """ arch_name, _ = split_model_name_tag(model_name) model_names = _model_with_tags[arch_name] cfgs = {m: _model_pretrained_cfgs[m] for m in model_names} return cfgs
pytorch-image-models/timm/models/_registry.py/0
{ "file_path": "pytorch-image-models/timm/models/_registry.py", "repo_id": "pytorch-image-models", "token_count": 5725 }
265
""" EdgeNeXt Paper: `EdgeNeXt: Efficiently Amalgamated CNN-Transformer Architecture for Mobile Vision Applications` - https://arxiv.org/abs/2206.10589 Original code and weights from https://github.com/mmaaz60/EdgeNeXt Modifications and additions for timm by / Copyright 2022, Ross Wightman """ import math from functools import partial from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import trunc_normal_tf_, DropPath, LayerNorm2d, Mlp, create_conv2d, \ NormMlpClassifierHead, ClassifierHead from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_module from ._manipulate import named_apply, checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['EdgeNeXt'] # model_registry will add each entrypoint fn to this @register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method class PositionalEncodingFourier(nn.Module): def __init__(self, hidden_dim=32, dim=768, temperature=10000): super().__init__() self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) self.scale = 2 * math.pi self.temperature = temperature self.hidden_dim = hidden_dim self.dim = dim def forward(self, shape: Tuple[int, int, int]): device = self.token_projection.weight.device dtype = self.token_projection.weight.dtype inv_mask = ~torch.zeros(shape).to(device=device, dtype=torch.bool) y_embed = inv_mask.cumsum(1, dtype=torch.float32) x_embed = inv_mask.cumsum(2, dtype=torch.float32) eps = 1e-6 y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.hidden_dim, dtype=torch.int64, device=device).to(torch.float32) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack( (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack( (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) pos = self.token_projection(pos.to(dtype)) return pos class ConvBlock(nn.Module): def __init__( self, dim, dim_out=None, kernel_size=7, stride=1, conv_bias=True, expand_ratio=4, ls_init_value=1e-6, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop_path=0., ): super().__init__() dim_out = dim_out or dim self.shortcut_after_dw = stride > 1 or dim != dim_out self.conv_dw = create_conv2d( dim, dim_out, kernel_size=kernel_size, stride=stride, depthwise=True, bias=conv_bias) self.norm = norm_layer(dim_out) self.mlp = Mlp(dim_out, int(expand_ratio * dim_out), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(dim_out)) if ls_init_value > 0 else None self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x x = self.conv_dw(x) if self.shortcut_after_dw: shortcut = x x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) x = self.norm(x) x = self.mlp(x) if self.gamma is not None: x = self.gamma * x x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) x = shortcut + self.drop_path(x) return x class CrossCovarianceAttn(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0. ): super().__init__() self.num_heads = num_heads self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 4, 1) q, k, v = qkv.unbind(0) # NOTE, this is NOT spatial attn, q, k, v are B, num_heads, C, L --> C x C attn map attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) * self.temperature attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v) x = x.permute(0, 3, 1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x @torch.jit.ignore def no_weight_decay(self): return {'temperature'} class SplitTransposeBlock(nn.Module): def __init__( self, dim, num_scales=1, num_heads=8, expand_ratio=4, use_pos_emb=True, conv_bias=True, qkv_bias=True, ls_init_value=1e-6, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop_path=0., attn_drop=0., proj_drop=0. ): super().__init__() width = max(int(math.ceil(dim / num_scales)), int(math.floor(dim // num_scales))) self.width = width self.num_scales = max(1, num_scales - 1) convs = [] for i in range(self.num_scales): convs.append(create_conv2d(width, width, kernel_size=3, depthwise=True, bias=conv_bias)) self.convs = nn.ModuleList(convs) self.pos_embd = None if use_pos_emb: self.pos_embd = PositionalEncodingFourier(dim=dim) self.norm_xca = norm_layer(dim) self.gamma_xca = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None self.xca = CrossCovarianceAttn( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.norm = norm_layer(dim, eps=1e-6) self.mlp = Mlp(dim, int(expand_ratio * dim), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x # scales code re-written for torchscript as per my res2net fixes -rw # NOTE torch.split(x, self.width, 1) causing issues with ONNX export spx = x.chunk(len(self.convs) + 1, dim=1) spo = [] sp = spx[0] for i, conv in enumerate(self.convs): if i > 0: sp = sp + spx[i] sp = conv(sp) spo.append(sp) spo.append(spx[-1]) x = torch.cat(spo, 1) # XCA B, C, H, W = x.shape x = x.reshape(B, C, H * W).permute(0, 2, 1) if self.pos_embd is not None: pos_encoding = self.pos_embd((B, H, W)).reshape(B, -1, x.shape[1]).permute(0, 2, 1) x = x + pos_encoding x = x + self.drop_path(self.gamma_xca * self.xca(self.norm_xca(x))) x = x.reshape(B, H, W, C) # Inverted Bottleneck x = self.norm(x) x = self.mlp(x) if self.gamma is not None: x = self.gamma * x x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) x = shortcut + self.drop_path(x) return x class EdgeNeXtStage(nn.Module): def __init__( self, in_chs, out_chs, stride=2, depth=2, num_global_blocks=1, num_heads=4, scales=2, kernel_size=7, expand_ratio=4, use_pos_emb=False, downsample_block=False, conv_bias=True, ls_init_value=1.0, drop_path_rates=None, norm_layer=LayerNorm2d, norm_layer_cl=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU ): super().__init__() self.grad_checkpointing = False if downsample_block or stride == 1: self.downsample = nn.Identity() else: self.downsample = nn.Sequential( norm_layer(in_chs), nn.Conv2d(in_chs, out_chs, kernel_size=2, stride=2, bias=conv_bias) ) in_chs = out_chs stage_blocks = [] for i in range(depth): if i < depth - num_global_blocks: stage_blocks.append( ConvBlock( dim=in_chs, dim_out=out_chs, stride=stride if downsample_block and i == 0 else 1, conv_bias=conv_bias, kernel_size=kernel_size, expand_ratio=expand_ratio, ls_init_value=ls_init_value, drop_path=drop_path_rates[i], norm_layer=norm_layer_cl, act_layer=act_layer, ) ) else: stage_blocks.append( SplitTransposeBlock( dim=in_chs, num_scales=scales, num_heads=num_heads, expand_ratio=expand_ratio, use_pos_emb=use_pos_emb, conv_bias=conv_bias, ls_init_value=ls_init_value, drop_path=drop_path_rates[i], norm_layer=norm_layer_cl, act_layer=act_layer, ) ) in_chs = out_chs self.blocks = nn.Sequential(*stage_blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class EdgeNeXt(nn.Module): def __init__( self, in_chans=3, num_classes=1000, global_pool='avg', dims=(24, 48, 88, 168), depths=(3, 3, 9, 3), global_block_counts=(0, 1, 1, 1), kernel_sizes=(3, 5, 7, 9), heads=(8, 8, 8, 8), d2_scales=(2, 2, 3, 4), use_pos_emb=(False, True, False, False), ls_init_value=1e-6, head_init_scale=1., expand_ratio=4, downsample_block=False, conv_bias=True, stem_type='patch', head_norm_first=False, act_layer=nn.GELU, drop_path_rate=0., drop_rate=0., ): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.drop_rate = drop_rate norm_layer = partial(LayerNorm2d, eps=1e-6) norm_layer_cl = partial(nn.LayerNorm, eps=1e-6) self.feature_info = [] assert stem_type in ('patch', 'overlap') if stem_type == 'patch': self.stem = nn.Sequential( nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4, bias=conv_bias), norm_layer(dims[0]), ) else: self.stem = nn.Sequential( nn.Conv2d(in_chans, dims[0], kernel_size=9, stride=4, padding=9 // 2, bias=conv_bias), norm_layer(dims[0]), ) curr_stride = 4 stages = [] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] in_chs = dims[0] for i in range(4): stride = 2 if curr_stride == 2 or i > 0 else 1 # FIXME support dilation / output_stride curr_stride *= stride stages.append(EdgeNeXtStage( in_chs=in_chs, out_chs=dims[i], stride=stride, depth=depths[i], num_global_blocks=global_block_counts[i], num_heads=heads[i], drop_path_rates=dp_rates[i], scales=d2_scales[i], expand_ratio=expand_ratio, kernel_size=kernel_sizes[i], use_pos_emb=use_pos_emb[i], ls_init_value=ls_init_value, downsample_block=downsample_block, conv_bias=conv_bias, norm_layer=norm_layer, norm_layer_cl=norm_layer_cl, act_layer=act_layer, )) # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 in_chs = dims[i] self.feature_info += [dict(num_chs=in_chs, reduction=curr_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.num_features = self.head_hidden_size = dims[-1] if head_norm_first: self.norm_pre = norm_layer(self.num_features) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) else: self.norm_pre = nn.Identity() self.head = NormMlpClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, norm_layer=norm_layer, ) named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.downsample', (0,)), # blocks (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^norm_pre', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stages), indices) # forward pass x = self.stem(x) last_idx = len(self.stages) - 1 if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index + 1] for feat_idx, stage in enumerate(stages): x = stage(x) if feat_idx in take_indices: if norm and feat_idx == last_idx: x_inter = self.norm_pre(x) # applying final norm to last intermediate else: x_inter = x intermediates.append(x_inter) if intermediates_only: return intermediates if feat_idx == last_idx: x = self.norm_pre(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_norm: self.norm_pre = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm_pre(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name=None, head_init_scale=1.0): if isinstance(module, nn.Conv2d): trunc_normal_tf_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): trunc_normal_tf_(module.weight, std=.02) nn.init.zeros_(module.bias) if name and 'head.' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def checkpoint_filter_fn(state_dict, model): """ Remap FB checkpoints -> timm """ if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict: return state_dict # non-FB checkpoint # models were released as train checkpoints... :/ if 'model_ema' in state_dict: state_dict = state_dict['model_ema'] elif 'model' in state_dict: state_dict = state_dict['model'] elif 'state_dict' in state_dict: state_dict = state_dict['state_dict'] out_dict = {} import re for k, v in state_dict.items(): k = k.replace('downsample_layers.0.', 'stem.') k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k) k = k.replace('dwconv', 'conv_dw') k = k.replace('pwconv', 'mlp.fc') k = k.replace('head.', 'head.fc.') if k.startswith('norm.'): k = k.replace('norm', 'head.norm') if v.ndim == 2 and 'head' not in k: model_shape = model.state_dict()[k].shape v = v.reshape(model_shape) out_dict[k] = v return out_dict def _create_edgenext(variant, pretrained=False, **kwargs): model = build_model_with_cfg( EdgeNeXt, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'edgenext_xx_small.in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'edgenext_x_small.in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'edgenext_small.usi_in1k': _cfg( # USI weights hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, ), 'edgenext_base.usi_in1k': _cfg( # USI weights hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, ), 'edgenext_base.in21k_ft_in1k': _cfg( # USI weights hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, ), 'edgenext_small_rw.sw_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 320, 320), test_crop_pct=1.0, ), }) @register_model def edgenext_xx_small(pretrained=False, **kwargs) -> EdgeNeXt: # 1.33M & 260.58M @ 256 resolution # 71.23% Top-1 accuracy # No AA, Color Jitter=0.4, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=51.66 versus 47.67 for MobileViT_XXS # For A100: FPS @ BS=1: 212.13 & @ BS=256: 7042.06 versus FPS @ BS=1: 96.68 & @ BS=256: 4624.71 for MobileViT_XXS model_args = dict(depths=(2, 2, 6, 2), dims=(24, 48, 88, 168), heads=(4, 4, 4, 4)) return _create_edgenext('edgenext_xx_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_x_small(pretrained=False, **kwargs) -> EdgeNeXt: # 2.34M & 538.0M @ 256 resolution # 75.00% Top-1 accuracy # No AA, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=31.61 versus 28.49 for MobileViT_XS # For A100: FPS @ BS=1: 179.55 & @ BS=256: 4404.95 versus FPS @ BS=1: 94.55 & @ BS=256: 2361.53 for MobileViT_XS model_args = dict(depths=(3, 3, 9, 3), dims=(32, 64, 100, 192), heads=(4, 4, 4, 4)) return _create_edgenext('edgenext_x_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_small(pretrained=False, **kwargs) -> EdgeNeXt: # 5.59M & 1260.59M @ 256 resolution # 79.43% Top-1 accuracy # AA=True, No Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=20.47 versus 18.86 for MobileViT_S # For A100: FPS @ BS=1: 172.33 & @ BS=256: 3010.25 versus FPS @ BS=1: 93.84 & @ BS=256: 1785.92 for MobileViT_S model_args = dict(depths=(3, 3, 9, 3), dims=(48, 96, 160, 304)) return _create_edgenext('edgenext_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_base(pretrained=False, **kwargs) -> EdgeNeXt: # 18.51M & 3840.93M @ 256 resolution # 82.5% (normal) 83.7% (USI) Top-1 accuracy # AA=True, Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=xx.xx versus xx.xx for MobileViT_S # For A100: FPS @ BS=1: xxx.xx & @ BS=256: xxxx.xx model_args = dict(depths=[3, 3, 9, 3], dims=[80, 160, 288, 584]) return _create_edgenext('edgenext_base', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def edgenext_small_rw(pretrained=False, **kwargs) -> EdgeNeXt: model_args = dict( depths=(3, 3, 9, 3), dims=(48, 96, 192, 384), downsample_block=True, conv_bias=False, stem_type='overlap') return _create_edgenext('edgenext_small_rw', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/edgenext.py/0
{ "file_path": "pytorch-image-models/timm/models/edgenext.py", "repo_id": "pytorch-image-models", "token_count": 12222 }
266
from ._builder import * from ._helpers import * from ._manipulate import * from ._prune import * import warnings warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", FutureWarning)
pytorch-image-models/timm/models/helpers.py/0
{ "file_path": "pytorch-image-models/timm/models/helpers.py", "repo_id": "pytorch-image-models", "token_count": 62 }
267
""" MobileNet V3 A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl. Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244 Hacked together by / Copyright 2019, Ross Wightman """ from functools import partial from typing import Any, Dict, Callable, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import SelectAdaptivePool2d, Linear, LayerType, PadType, create_conv2d, get_norm_act_layer from ._builder import build_model_with_cfg, pretrained_cfg_for_features from ._efficientnet_blocks import SqueezeExcite from ._efficientnet_builder import BlockArgs, EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \ round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT from ._features import FeatureInfo, FeatureHooks, feature_take_indices from ._manipulate import checkpoint_seq, checkpoint from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['MobileNetV3', 'MobileNetV3Features'] class MobileNetV3(nn.Module): """MobileNetV3. Based on my EfficientNet implementation and building blocks, this model utilizes the MobileNet-v3 specific 'efficient head', where global pooling is done before the head convolution without a final batch-norm layer before the classifier. Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244 Other architectures utilizing MobileNet-V3 efficient head that are supported by this impl include: * HardCoRe-NAS - https://arxiv.org/abs/2102.11646 (defn in hardcorenas.py uses this class) * FBNet-V3 - https://arxiv.org/abs/2006.02049 * LCNet - https://arxiv.org/abs/2109.15099 * MobileNet-V4 - https://arxiv.org/abs/2404.10518 """ def __init__( self, block_args: BlockArgs, num_classes: int = 1000, in_chans: int = 3, stem_size: int = 16, fix_stem: bool = False, num_features: int = 1280, head_bias: bool = True, head_norm: bool = False, pad_type: str = '', act_layer: Optional[LayerType] = None, norm_layer: Optional[LayerType] = None, aa_layer: Optional[LayerType] = None, se_layer: Optional[LayerType] = None, se_from_exp: bool = True, round_chs_fn: Callable = round_channels, drop_rate: float = 0., drop_path_rate: float = 0., layer_scale_init_value: Optional[float] = None, global_pool: str = 'avg', ): """Initialize MobileNetV3. Args: block_args: Arguments for blocks of the network. num_classes: Number of classes for classification head. in_chans: Number of input image channels. stem_size: Number of output channels of the initial stem convolution. fix_stem: If True, don't scale stem by round_chs_fn. num_features: Number of output channels of the conv head layer. head_bias: If True, add a learnable bias to the conv head layer. head_norm: If True, add normalization to the head layer. pad_type: Type of padding to use for convolution layers. act_layer: Type of activation layer. norm_layer: Type of normalization layer. aa_layer: Type of anti-aliasing layer. se_layer: Type of Squeeze-and-Excite layer. se_from_exp: If True, calculate SE channel reduction from expanded mid channels. round_chs_fn: Callable to round number of filters based on depth multiplier. drop_rate: Dropout rate. drop_path_rate: Stochastic depth rate. layer_scale_init_value: Enable layer scale on compatible blocks if not None. global_pool: Type of pooling to use for global pooling features of the FC head. """ super(MobileNetV3, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d norm_act_layer = get_norm_act_layer(norm_layer, act_layer) se_layer = se_layer or SqueezeExcite self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False # Stem if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) self.bn1 = norm_act_layer(stem_size, inplace=True) # Middle stages (IR/ER/DS Blocks) builder = EfficientNetBuilder( output_stride=32, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, layer_scale_init_value=layer_scale_init_value, ) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = builder.features self.stage_ends = [f['stage'] for f in self.feature_info] self.num_features = builder.in_chs # features of last stage, output of forward_features() self.head_hidden_size = num_features # features of conv_head, pre_logits output # Head + Pooling self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) num_pooled_chs = self.num_features * self.global_pool.feat_mult() if head_norm: # mobilenet-v4 post-pooling PW conv is followed by a norm+act layer self.conv_head = create_conv2d(num_pooled_chs, self.head_hidden_size, 1, padding=pad_type) # never bias self.norm_head = norm_act_layer(self.head_hidden_size) self.act2 = nn.Identity() else: # mobilenet-v3 and others only have an activation after final PW conv self.conv_head = create_conv2d(num_pooled_chs, self.head_hidden_size, 1, padding=pad_type, bias=head_bias) self.norm_head = nn.Identity() self.act2 = act_layer(inplace=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity() efficientnet_init_weights(self) def as_sequential(self) -> nn.Sequential: """Convert model to sequential form. Returns: Sequential module containing all layers. """ layers = [self.conv_stem, self.bn1] layers.extend(self.blocks) layers.extend([self.global_pool, self.conv_head, self.norm_head, self.act2]) layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict[str, Any]: """Group parameters for optimization.""" return dict( stem=r'^conv_stem|bn1', blocks=r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)' ) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: """Enable or disable gradient checkpointing.""" self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: """Get the classifier head.""" return self.classifier def reset_classifier(self, num_classes: int, global_pool: str = 'avg') -> None: """Reset the classifier head. Args: num_classes: Number of classes for new classifier. global_pool: Global pooling type. """ self.num_classes = num_classes # NOTE: cannot meaningfully change pooling of efficient head after creation self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity() def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, extra_blocks: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features extra_blocks: Include outputs of all blocks and head conv in output, does not align with feature_info Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' if stop_early: assert intermediates_only, 'Must use intermediates_only for early stopping.' intermediates = [] if extra_blocks: take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices) else: take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] # forward pass feat_idx = 0 # stem is index 0 x = self.conv_stem(x) x = self.bn1(x) if feat_idx in take_indices: intermediates.append(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index] for feat_idx, blk in enumerate(blocks, start=1): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(blk, x) else: x = blk(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, extra_blocks: bool = False, ) -> List[int]: """Prune layers not required for specified intermediates. Args: indices: Indices of intermediate layers to keep. prune_norm: Whether to prune normalization layer. prune_head: Whether to prune the classifier head. extra_blocks: Include outputs of all blocks. Returns: List of indices that were kept. """ if extra_blocks: take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices) else: take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] self.blocks = self.blocks[:max_index] # truncate blocks w/ stem as idx 0 if max_index < len(self.blocks): self.conv_head = nn.Identity() self.norm_head = nn.Identity() if prune_head: self.conv_head = nn.Identity() self.norm_head = nn.Identity() self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: """Forward pass through feature extraction layers. Args: x: Input tensor. Returns: Feature tensor. """ x = self.conv_stem(x) x = self.bn1(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: """Forward pass through classifier head. Args: x: Input features. pre_logits: Return features before final linear layer. Returns: Classification logits or features. """ x = self.global_pool(x) x = self.conv_head(x) x = self.norm_head(x) x = self.act2(x) x = self.flatten(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) if pre_logits: return x return self.classifier(x) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x: Input tensor. Returns: Output logits. """ x = self.forward_features(x) x = self.forward_head(x) return x class MobileNetV3Features(nn.Module): """MobileNetV3 Feature Extractor. A work-in-progress feature extraction module for MobileNet-V3 to use as a backbone for segmentation and object detection models. """ def __init__( self, block_args: BlockArgs, out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4), feature_location: str = 'bottleneck', in_chans: int = 3, stem_size: int = 16, fix_stem: bool = False, output_stride: int = 32, pad_type: PadType = '', round_chs_fn: Callable = round_channels, se_from_exp: bool = True, act_layer: Optional[LayerType] = None, norm_layer: Optional[LayerType] = None, aa_layer: Optional[LayerType] = None, se_layer: Optional[LayerType] = None, drop_rate: float = 0., drop_path_rate: float = 0., layer_scale_init_value: Optional[float] = None, ): """Initialize MobileNetV3Features. Args: block_args: Arguments for blocks of the network. out_indices: Output from stages at indices. feature_location: Location of feature before/after each block, must be in ['bottleneck', 'expansion']. in_chans: Number of input image channels. stem_size: Number of output channels of the initial stem convolution. fix_stem: If True, don't scale stem by round_chs_fn. output_stride: Output stride of the network. pad_type: Type of padding to use for convolution layers. round_chs_fn: Callable to round number of filters based on depth multiplier. se_from_exp: If True, calculate SE channel reduction from expanded mid channels. act_layer: Type of activation layer. norm_layer: Type of normalization layer. aa_layer: Type of anti-aliasing layer. se_layer: Type of Squeeze-and-Excite layer. drop_rate: Dropout rate. drop_path_rate: Stochastic depth rate. layer_scale_init_value: Enable layer scale on compatible blocks if not None. """ super(MobileNetV3Features, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d se_layer = se_layer or SqueezeExcite self.drop_rate = drop_rate self.grad_checkpointing = False # Stem if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) self.bn1 = norm_layer(stem_size) self.act1 = act_layer(inplace=True) # Middle stages (IR/ER/DS Blocks) builder = EfficientNetBuilder( output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, layer_scale_init_value=layer_scale_init_value, feature_location=feature_location, ) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = FeatureInfo(builder.features, out_indices) self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()} efficientnet_init_weights(self) # Register feature extraction hooks with FeatureHooks helper self.feature_hooks = None if feature_location != 'bottleneck': hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) self.feature_hooks = FeatureHooks(hooks, self.named_modules()) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: """Enable or disable gradient checkpointing.""" self.grad_checkpointing = enable def forward(self, x: torch.Tensor) -> List[torch.Tensor]: """Forward pass through feature extraction. Args: x: Input tensor. Returns: List of feature tensors. """ x = self.conv_stem(x) x = self.bn1(x) x = self.act1(x) if self.feature_hooks is None: features = [] if 0 in self._stage_out_idx: features.append(x) # add stem out for i, b in enumerate(self.blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(b, x) else: x = b(x) if i + 1 in self._stage_out_idx: features.append(x) return features else: self.blocks(x) out = self.feature_hooks.get_output(x.device) return list(out.values()) def _create_mnv3(variant: str, pretrained: bool = False, **kwargs) -> MobileNetV3: """Create a MobileNetV3 model. Args: variant: Model variant name. pretrained: Load pretrained weights. **kwargs: Additional model arguments. Returns: MobileNetV3 model instance. """ features_mode = '' model_cls = MobileNetV3 kwargs_filter = None if kwargs.pop('features_only', False): if 'feature_cfg' in kwargs or 'feature_cls' in kwargs: features_mode = 'cfg' else: kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'head_norm', 'global_pool') model_cls = MobileNetV3Features features_mode = 'cls' model = build_model_with_cfg( model_cls, variant, pretrained, features_only=features_mode == 'cfg', pretrained_strict=features_mode != 'cls', kwargs_filter=kwargs_filter, **kwargs, ) if features_mode == 'cls': model.default_cfg = pretrained_cfg_for_features(model.default_cfg) return model def _gen_mobilenet_v3_rw( variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs ) -> MobileNetV3: """Creates a MobileNet-V3 model. Ref impl: ? Paper: https://arxiv.org/abs/1905.02244 Args: variant: Model variant name. channel_multiplier: Multiplier to number of channels per layer. pretrained: Load pretrained weights. **kwargs: Additional model arguments. Returns: MobileNetV3 model instance. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu # stage 1, 112x112 in ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu # stage 2, 56x56 in ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu # stage 3, 28x28 in ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish # stage 4, 14x14in ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish # stage 5, 14x14in ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish # stage 6, 7x7 in ['cn_r1_k1_s1_c960'], # hard-swish ] model_kwargs = dict( block_args=decode_arch_def(arch_def), head_bias=False, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid'), **kwargs, ) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_v3( variant: str, channel_multiplier: float = 1.0, depth_multiplier: float = 1.0, group_size: Optional[int] = None, pretrained: bool = False, **kwargs ) -> MobileNetV3: """Creates a MobileNet-V3 model. Ref impl: ? Paper: https://arxiv.org/abs/1905.02244 Args: variant: Model variant name. channel_multiplier: Multiplier to number of channels per layer. depth_multiplier: Depth multiplier for model scaling. group_size: Group size for grouped convolutions. pretrained: Load pretrained weights. **kwargs: Additional model arguments. Returns: MobileNetV3 model instance. """ if 'small' in variant: num_features = 1024 if 'minimal' in variant: act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s2_e1_c16'], # stage 1, 56x56 in ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], # stage 2, 28x28 in ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], # stage 3, 14x14 in ['ir_r2_k3_s1_e3_c48'], # stage 4, 14x14in ['ir_r3_k3_s2_e6_c96'], # stage 6, 7x7 in ['cn_r1_k1_s1_c576'], ] else: act_layer = resolve_act_layer(kwargs, 'hard_swish') arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s2_e1_c16_se0.25_nre'], # relu # stage 1, 56x56 in ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], # relu # stage 2, 28x28 in ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], # hard-swish # stage 3, 14x14 in ['ir_r2_k5_s1_e3_c48_se0.25'], # hard-swish # stage 4, 14x14in ['ir_r3_k5_s2_e6_c96_se0.25'], # hard-swish # stage 6, 7x7 in ['cn_r1_k1_s1_c576'], # hard-swish ] else: num_features = 1280 if 'minimal' in variant: act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16'], # stage 1, 112x112 in ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], # stage 2, 56x56 in ['ir_r3_k3_s2_e3_c40'], # stage 3, 28x28 in ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # stage 4, 14x14in ['ir_r2_k3_s1_e6_c112'], # stage 5, 14x14in ['ir_r3_k3_s2_e6_c160'], # stage 6, 7x7 in ['cn_r1_k1_s1_c960'], ] else: act_layer = resolve_act_layer(kwargs, 'hard_swish') arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16_nre'], # relu # stage 1, 112x112 in ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu # stage 2, 56x56 in ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu # stage 3, 28x28 in ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish # stage 4, 14x14in ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish # stage 5, 14x14in ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish # stage 6, 7x7 in ['cn_r1_k1_s1_c960'], # hard-swish ] se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, group_size=group_size), num_features=num_features, stem_size=16, fix_stem=channel_multiplier < 0.75, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=act_layer, se_layer=se_layer, **kwargs, ) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _gen_fbnetv3(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs) -> MobileNetV3: """FBNetV3 model generator. Paper: `FBNetV3: Joint Architecture-Recipe Search using Predictor Pretraining` - https://arxiv.org/abs/2006.02049 FIXME untested, this is a preliminary impl of some FBNet-V3 variants. Args: variant: Model variant name. channel_multiplier: Channel width multiplier. pretrained: Load pretrained weights. **kwargs: Additional model arguments. Returns: MobileNetV3 model instance. """ vl = variant.split('_')[-1] if vl in ('a', 'b'): stem_size = 16 arch_def = [ ['ds_r2_k3_s1_e1_c16'], ['ir_r1_k5_s2_e4_c24', 'ir_r3_k5_s1_e2_c24'], ['ir_r1_k5_s2_e5_c40_se0.25', 'ir_r4_k5_s1_e3_c40_se0.25'], ['ir_r1_k5_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], ['ir_r1_k3_s1_e5_c120_se0.25', 'ir_r5_k5_s1_e3_c120_se0.25'], ['ir_r1_k3_s2_e6_c184_se0.25', 'ir_r5_k5_s1_e4_c184_se0.25', 'ir_r1_k5_s1_e6_c224_se0.25'], ['cn_r1_k1_s1_c1344'], ] elif vl == 'd': stem_size = 24 arch_def = [ ['ds_r2_k3_s1_e1_c16'], ['ir_r1_k3_s2_e5_c24', 'ir_r5_k3_s1_e2_c24'], ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r4_k3_s1_e3_c40_se0.25'], ['ir_r1_k3_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], ['ir_r1_k3_s1_e5_c128_se0.25', 'ir_r6_k5_s1_e3_c128_se0.25'], ['ir_r1_k3_s2_e6_c208_se0.25', 'ir_r5_k5_s1_e5_c208_se0.25', 'ir_r1_k5_s1_e6_c240_se0.25'], ['cn_r1_k1_s1_c1440'], ] elif vl == 'g': stem_size = 32 arch_def = [ ['ds_r3_k3_s1_e1_c24'], ['ir_r1_k5_s2_e4_c40', 'ir_r4_k5_s1_e2_c40'], ['ir_r1_k5_s2_e4_c56_se0.25', 'ir_r4_k5_s1_e3_c56_se0.25'], ['ir_r1_k5_s2_e5_c104', 'ir_r4_k3_s1_e3_c104'], ['ir_r1_k3_s1_e5_c160_se0.25', 'ir_r8_k5_s1_e3_c160_se0.25'], ['ir_r1_k3_s2_e6_c264_se0.25', 'ir_r6_k5_s1_e5_c264_se0.25', 'ir_r2_k5_s1_e6_c288_se0.25'], ['cn_r1_k1_s1_c1728'], ] else: raise NotImplemented round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.95) se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=round_chs_fn) act_layer = resolve_act_layer(kwargs, 'hard_swish') model_kwargs = dict( block_args=decode_arch_def(arch_def), num_features=1984, head_bias=False, stem_size=stem_size, round_chs_fn=round_chs_fn, se_from_exp=False, norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=act_layer, se_layer=se_layer, **kwargs, ) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _gen_lcnet(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs) -> MobileNetV3: """LCNet model generator. Essentially a MobileNet-V3 crossed with a MobileNet-V1 Paper: `PP-LCNet: A Lightweight CPU Convolutional Neural Network` - https://arxiv.org/abs/2109.15099 Args: variant: Model variant name. channel_multiplier: Multiplier to number of channels per layer. pretrained: Load pretrained weights. **kwargs: Additional model arguments. Returns: MobileNetV3 model instance. """ arch_def = [ # stage 0, 112x112 in ['dsa_r1_k3_s1_c32'], # stage 1, 112x112 in ['dsa_r2_k3_s2_c64'], # stage 2, 56x56 in ['dsa_r2_k3_s2_c128'], # stage 3, 28x28 in ['dsa_r1_k3_s2_c256', 'dsa_r1_k5_s1_c256'], # stage 4, 14x14in ['dsa_r4_k5_s1_c256'], # stage 5, 14x14in ['dsa_r2_k5_s2_c512_se0.25'], # 7x7 ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=16, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU), num_features=1280, **kwargs, ) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_v4( variant: str, channel_multiplier: float = 1.0, group_size: Optional[int] = None, pretrained: bool = False, **kwargs, ) -> MobileNetV3: """Creates a MobileNet-V4 model. Paper: https://arxiv.org/abs/2404.10518 Args: variant: Model variant name. channel_multiplier: Multiplier to number of channels per layer. group_size: Group size for grouped convolutions. pretrained: Load pretrained weights. **kwargs: Additional model arguments. Returns: MobileNetV3 model instance. """ num_features = 1280 if 'hybrid' in variant: layer_scale_init_value = 1e-5 if 'medium' in variant: stem_size = 32 act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [ # stage 0, 112x112 in [ 'er_r1_k3_s2_e4_c48' # FusedIB (EdgeResidual) ], # stage 1, 56x56 in [ 'uir_r1_a3_k5_s2_e4_c80', # ExtraDW 'uir_r1_a3_k3_s1_e2_c80', # ExtraDW ], # stage 2, 28x28 in [ 'uir_r1_a3_k5_s2_e6_c160', # ExtraDW 'uir_r1_a0_k0_s1_e2_c160', # FFN 'uir_r1_a3_k3_s1_e4_c160', # ExtraDW 'uir_r1_a3_k5_s1_e4_c160', # ExtraDW 'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample 'uir_r1_a3_k3_s1_e4_c160', # ExtraDW 'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample 'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt 'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample 'uir_r1_a3_k3_s1_e4_c160', # ExtraDW 'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample 'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt ], # stage 3, 14x14in [ 'uir_r1_a5_k5_s2_e6_c256', # ExtraDW 'uir_r1_a5_k5_s1_e4_c256', # ExtraDW 'uir_r2_a3_k5_s1_e4_c256', # ExtraDW 'uir_r1_a0_k0_s1_e2_c256', # FFN 'uir_r1_a3_k5_s1_e2_c256', # ExtraDW 'uir_r1_a0_k0_s1_e2_c256', # FFN 'uir_r1_a0_k0_s1_e4_c256', # FFN 'mqa_r1_k3_h4_s1_d64_c256', # MQA 'uir_r1_a3_k0_s1_e4_c256', # ConvNeXt 'mqa_r1_k3_h4_s1_d64_c256', # MQA 'uir_r1_a5_k5_s1_e4_c256', # ExtraDW 'mqa_r1_k3_h4_s1_d64_c256', # MQA 'uir_r1_a5_k0_s1_e4_c256', # ConvNeXt 'mqa_r1_k3_h4_s1_d64_c256', # MQA 'uir_r1_a5_k0_s1_e4_c256', # ConvNeXt ], # stage 4, 7x7 in [ 'cn_r1_k1_s1_c960' # Conv ], ] elif 'large' in variant: stem_size = 24 act_layer = resolve_act_layer(kwargs, 'gelu') arch_def = [ # stage 0, 112x112 in [ 'er_r1_k3_s2_e4_c48', # FusedIB (EdgeResidual) ], # stage 1, 56x56 in [ 'uir_r1_a3_k5_s2_e4_c96', # ExtraDW 'uir_r1_a3_k3_s1_e4_c96', # ExtraDW ], # stage 2, 28x28 in [ 'uir_r1_a3_k5_s2_e4_c192', # ExtraDW 'uir_r3_a3_k3_s1_e4_c192', # ExtraDW 'uir_r1_a3_k5_s1_e4_c192', # ExtraDW 'uir_r2_a5_k3_s1_e4_c192', # ExtraDW 'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample 'uir_r1_a5_k3_s1_e4_c192', # ExtraDW 'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample 'uir_r1_a5_k3_s1_e4_c192', # ExtraDW 'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample 'uir_r1_a5_k3_s1_e4_c192', # ExtraDW 'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample 'uir_r1_a3_k0_s1_e4_c192', # ConvNeXt ], # stage 3, 14x14in [ 'uir_r4_a5_k5_s2_e4_c512', # ExtraDW 'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt 'uir_r1_a5_k3_s1_e4_c512', # ExtraDW 'uir_r2_a5_k0_s1_e4_c512', # ConvNeXt 'uir_r1_a5_k3_s1_e4_c512', # ExtraDW 'uir_r1_a5_k5_s1_e4_c512', # ExtraDW 'mqa_r1_k3_h8_s1_d64_c512', # MQA 'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt 'mqa_r1_k3_h8_s1_d64_c512', # MQA 'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt 'mqa_r1_k3_h8_s1_d64_c512', # MQA 'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt 'mqa_r1_k3_h8_s1_d64_c512', # MQA 'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt ], # stage 4, 7x7 in [ 'cn_r1_k1_s1_c960', # Conv ], ] else: assert False, f'Unknown variant {variant}.' else: layer_scale_init_value = None if 'small' in variant: stem_size = 32 act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [ # stage 0, 112x112 in [ 'cn_r1_k3_s2_e1_c32', # Conv 'cn_r1_k1_s1_e1_c32', # Conv ], # stage 1, 56x56 in [ 'cn_r1_k3_s2_e1_c96', # Conv 'cn_r1_k1_s1_e1_c64', # Conv ], # stage 2, 28x28 in [ 'uir_r1_a5_k5_s2_e3_c96', # ExtraDW 'uir_r4_a0_k3_s1_e2_c96', # IR 'uir_r1_a3_k0_s1_e4_c96', # ConvNeXt ], # stage 3, 14x14 in [ 'uir_r1_a3_k3_s2_e6_c128', # ExtraDW 'uir_r1_a5_k5_s1_e4_c128', # ExtraDW 'uir_r1_a0_k5_s1_e4_c128', # IR 'uir_r1_a0_k5_s1_e3_c128', # IR 'uir_r2_a0_k3_s1_e4_c128', # IR ], # stage 4, 7x7 in [ 'cn_r1_k1_s1_c960', # Conv ], ] elif 'medium' in variant: stem_size = 32 act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [ # stage 0, 112x112 in [ 'er_r1_k3_s2_e4_c48', # FusedIB (EdgeResidual) ], # stage 1, 56x56 in [ 'uir_r1_a3_k5_s2_e4_c80', # ExtraDW 'uir_r1_a3_k3_s1_e2_c80', # ExtraDW ], # stage 2, 28x28 in [ 'uir_r1_a3_k5_s2_e6_c160', # ExtraDW 'uir_r2_a3_k3_s1_e4_c160', # ExtraDW 'uir_r1_a3_k5_s1_e4_c160', # ExtraDW 'uir_r1_a3_k3_s1_e4_c160', # ExtraDW 'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt 'uir_r1_a0_k0_s1_e2_c160', # ExtraDW 'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt ], # stage 3, 14x14in [ 'uir_r1_a5_k5_s2_e6_c256', # ExtraDW 'uir_r1_a5_k5_s1_e4_c256', # ExtraDW 'uir_r2_a3_k5_s1_e4_c256', # ExtraDW 'uir_r1_a0_k0_s1_e4_c256', # FFN 'uir_r1_a3_k0_s1_e4_c256', # ConvNeXt 'uir_r1_a3_k5_s1_e2_c256', # ExtraDW 'uir_r1_a5_k5_s1_e4_c256', # ExtraDW 'uir_r2_a0_k0_s1_e4_c256', # FFN 'uir_r1_a5_k0_s1_e2_c256', # ConvNeXt ], # stage 4, 7x7 in [ 'cn_r1_k1_s1_c960', # Conv ], ] elif 'large' in variant: stem_size = 24 act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [ # stage 0, 112x112 in [ 'er_r1_k3_s2_e4_c48', # FusedIB (EdgeResidual) ], # stage 1, 56x56 in [ 'uir_r1_a3_k5_s2_e4_c96', # ExtraDW 'uir_r1_a3_k3_s1_e4_c96', # ExtraDW ], # stage 2, 28x28 in [ 'uir_r1_a3_k5_s2_e4_c192', # ExtraDW 'uir_r3_a3_k3_s1_e4_c192', # ExtraDW 'uir_r1_a3_k5_s1_e4_c192', # ExtraDW 'uir_r5_a5_k3_s1_e4_c192', # ExtraDW 'uir_r1_a3_k0_s1_e4_c192', # ConvNeXt ], # stage 3, 14x14in [ 'uir_r4_a5_k5_s2_e4_c512', # ExtraDW 'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt 'uir_r1_a5_k3_s1_e4_c512', # ExtraDW 'uir_r2_a5_k0_s1_e4_c512', # ConvNeXt 'uir_r1_a5_k3_s1_e4_c512', # ExtraDW 'uir_r1_a5_k5_s1_e4_c512', # ExtraDW 'uir_r3_a5_k0_s1_e4_c512', # ConvNeXt ], # stage 4, 7x7 in [ 'cn_r1_k1_s1_c960', # Conv ], ] else: assert False, f'Unknown variant {variant}.' model_kwargs = dict( block_args=decode_arch_def(arch_def, group_size=group_size), head_bias=False, head_norm=True, num_features=num_features, stem_size=stem_size, fix_stem=channel_multiplier < 1.0, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=act_layer, layer_scale_init_value=layer_scale_init_value, **kwargs, ) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _cfg(url: str = '', **kwargs) -> Dict[str, Any]: """Create default configuration dictionary. Args: url: Model weight URL. **kwargs: Additional configuration options. Returns: Configuration dictionary. """ return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'mobilenetv3_large_075.untrained': _cfg(url=''), 'mobilenetv3_large_100.ra_in1k': _cfg( interpolation='bicubic', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth', hf_hub_id='timm/'), 'mobilenetv3_large_100.ra4_e3600_r224_in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0), 'mobilenetv3_large_100.miil_in21k_ft_in1k': _cfg( interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.), origin_url='https://github.com/Alibaba-MIIL/ImageNet21K', paper_ids='arXiv:2104.10972v4', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_1k_miil_78_0-66471c13.pth', hf_hub_id='timm/'), 'mobilenetv3_large_100.miil_in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_in21k_miil-d71cc17b.pth', hf_hub_id='timm/', origin_url='https://github.com/Alibaba-MIIL/ImageNet21K', paper_ids='arXiv:2104.10972v4', interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.), num_classes=11221), 'mobilenetv3_large_150d.ra4_e3600_r256_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 256, 256), crop_pct=0.95, pool_size=(8, 8), test_input_size=(3, 320, 320), test_crop_pct=1.0), 'mobilenetv3_small_050.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_050_lambc-4b7bbe87.pth', hf_hub_id='timm/', interpolation='bicubic'), 'mobilenetv3_small_075.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_075_lambc-384766db.pth', hf_hub_id='timm/', interpolation='bicubic'), 'mobilenetv3_small_100.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_100_lamb-266a294c.pth', hf_hub_id='timm/', interpolation='bicubic'), 'mobilenetv3_rw.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth', hf_hub_id='timm/', interpolation='bicubic'), 'tf_mobilenetv3_large_075.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_large_100.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_large_minimal_100.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_small_075.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_small_100.in1k': _cfg( url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_small_minimal_100.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'fbnetv3_b.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_b_224-ead5d2a1.pth', hf_hub_id='timm/', test_input_size=(3, 256, 256), crop_pct=0.95), 'fbnetv3_d.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_d_224-c98bce42.pth', hf_hub_id='timm/', test_input_size=(3, 256, 256), crop_pct=0.95), 'fbnetv3_g.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_g_240-0b1df83b.pth', hf_hub_id='timm/', input_size=(3, 240, 240), test_input_size=(3, 288, 288), crop_pct=0.95, pool_size=(8, 8)), "lcnet_035.untrained": _cfg(), "lcnet_050.ra2_in1k": _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_050-f447553b.pth', hf_hub_id='timm/', interpolation='bicubic', ), "lcnet_075.ra2_in1k": _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_075-318cad2c.pth', hf_hub_id='timm/', interpolation='bicubic', ), "lcnet_100.ra2_in1k": _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_100-a929038c.pth', hf_hub_id='timm/', interpolation='bicubic', ), "lcnet_150.untrained": _cfg(), 'mobilenetv4_conv_small_035.untrained': _cfg( mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'), 'mobilenetv4_conv_small_050.e3000_r224_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'), 'mobilenetv4_conv_small.e2400_r224_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'), 'mobilenetv4_conv_small.e1200_r224_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'), 'mobilenetv4_conv_small.e3600_r256_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_medium.e500_r256_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_medium.e500_r224_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_medium.e250_r384_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, interpolation='bicubic'), 'mobilenetv4_conv_medium.e180_r384_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_medium.e180_ad_r384_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_medium.e250_r384_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_large.e600_r384_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_large.e500_r256_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_medium.ix_e550_r256_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_medium.ix_e550_r384_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_medium.e500_r224_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_medium.e200_r256_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_large.ix_e600_r384_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_large.e600_r384_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), # experimental 'mobilenetv4_conv_aa_medium.untrained': _cfg( # hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, interpolation='bicubic'), 'mobilenetv4_conv_blur_medium.e500_r224_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=0.95, test_input_size=(3, 544, 544), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 480, 480), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_aa_large.e600_r384_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 480, 480), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_conv_aa_large.e230_r384_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), 'mobilenetv4_hybrid_medium_075.untrained': _cfg( # hf_hub_id='timm/', crop_pct=0.95, interpolation='bicubic'), 'mobilenetv4_hybrid_large_075.untrained': _cfg( # hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, interpolation='bicubic'), }) @register_model def mobilenetv3_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_large_100(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_large_150d(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ model = _gen_mobilenet_v3('mobilenetv3_large_150d', 1.5, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_small_050(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ model = _gen_mobilenet_v3('mobilenetv3_small_050', 0.50, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_small_075(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_small_100(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_rw(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_large_100(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_large_minimal_100(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_small_075(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_small_100(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_small_minimal_100(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def fbnetv3_b(pretrained: bool = False, **kwargs) -> MobileNetV3: """ FBNetV3-B """ model = _gen_fbnetv3('fbnetv3_b', pretrained=pretrained, **kwargs) return model @register_model def fbnetv3_d(pretrained: bool = False, **kwargs) -> MobileNetV3: """ FBNetV3-D """ model = _gen_fbnetv3('fbnetv3_d', pretrained=pretrained, **kwargs) return model @register_model def fbnetv3_g(pretrained: bool = False, **kwargs) -> MobileNetV3: """ FBNetV3-G """ model = _gen_fbnetv3('fbnetv3_g', pretrained=pretrained, **kwargs) return model @register_model def lcnet_035(pretrained: bool = False, **kwargs) -> MobileNetV3: """ PP-LCNet 0.35""" model = _gen_lcnet('lcnet_035', 0.35, pretrained=pretrained, **kwargs) return model @register_model def lcnet_050(pretrained: bool = False, **kwargs) -> MobileNetV3: """ PP-LCNet 0.5""" model = _gen_lcnet('lcnet_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def lcnet_075(pretrained: bool = False, **kwargs) -> MobileNetV3: """ PP-LCNet 1.0""" model = _gen_lcnet('lcnet_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def lcnet_100(pretrained: bool = False, **kwargs) -> MobileNetV3: """ PP-LCNet 1.0""" model = _gen_lcnet('lcnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def lcnet_150(pretrained: bool = False, **kwargs) -> MobileNetV3: """ PP-LCNet 1.5""" model = _gen_lcnet('lcnet_150', 1.5, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_conv_small_035(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V4 """ model = _gen_mobilenet_v4('mobilenetv4_conv_small_035', 0.35, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_conv_small_050(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V4 """ model = _gen_mobilenet_v4('mobilenetv4_conv_small_050', 0.50, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_conv_small(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V4 """ model = _gen_mobilenet_v4('mobilenetv4_conv_small', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_conv_medium(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V4 """ model = _gen_mobilenet_v4('mobilenetv4_conv_medium', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_conv_large(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V4 """ model = _gen_mobilenet_v4('mobilenetv4_conv_large', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_hybrid_medium(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V4 Hybrid """ model = _gen_mobilenet_v4('mobilenetv4_hybrid_medium', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_hybrid_large(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V4 Hybrid""" model = _gen_mobilenet_v4('mobilenetv4_hybrid_large', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_conv_aa_medium(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V4 w/ AvgPool AA """ model = _gen_mobilenet_v4('mobilenetv4_conv_aa_medium', 1.0, pretrained=pretrained, aa_layer='avg', **kwargs) return model @register_model def mobilenetv4_conv_blur_medium(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V4 Conv w/ Blur AA """ model = _gen_mobilenet_v4('mobilenetv4_conv_blur_medium', 1.0, pretrained=pretrained, aa_layer='blurpc', **kwargs) return model @register_model def mobilenetv4_conv_aa_large(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V4 w/ AvgPool AA """ model = _gen_mobilenet_v4('mobilenetv4_conv_aa_large', 1.0, pretrained=pretrained, aa_layer='avg', **kwargs) return model @register_model def mobilenetv4_hybrid_medium_075(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V4 Hybrid """ model = _gen_mobilenet_v4('mobilenetv4_hybrid_medium_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv4_hybrid_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3: """ MobileNet V4 Hybrid""" model = _gen_mobilenet_v4('mobilenetv4_hybrid_large_075', 0.75, pretrained=pretrained, **kwargs) return model register_model_deprecations(__name__, { 'mobilenetv3_large_100_miil': 'mobilenetv3_large_100.miil_in21k_ft_in1k', 'mobilenetv3_large_100_miil_in21k': 'mobilenetv3_large_100.miil_in21k', })
pytorch-image-models/timm/models/mobilenetv3.py/0
{ "file_path": "pytorch-image-models/timm/models/mobilenetv3.py", "repo_id": "pytorch-image-models", "token_count": 32542 }
268
""" RepViT Paper: `RepViT: Revisiting Mobile CNN From ViT Perspective` - https://arxiv.org/abs/2307.09283 @misc{wang2023repvit, title={RepViT: Revisiting Mobile CNN From ViT Perspective}, author={Ao Wang and Hui Chen and Zijia Lin and Hengjun Pu and Guiguang Ding}, year={2023}, eprint={2307.09283}, archivePrefix={arXiv}, primaryClass={cs.CV} } Adapted from official impl at https://github.com/jameslahm/RepViT """ from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SqueezeExcite, trunc_normal_, to_ntuple, to_2tuple from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint, checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['RepVit'] class ConvNorm(nn.Sequential): def __init__(self, in_dim, out_dim, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): super().__init__() self.add_module('c', nn.Conv2d(in_dim, out_dim, ks, stride, pad, dilation, groups, bias=False)) self.add_module('bn', nn.BatchNorm2d(out_dim)) nn.init.constant_(self.bn.weight, bn_weight_init) nn.init.constant_(self.bn.bias, 0) @torch.no_grad() def fuse(self): c, bn = self._modules.values() w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = c.weight * w[:, None, None, None] b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 m = nn.Conv2d( w.size(1) * self.c.groups, w.size(0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups, device=c.weight.device, ) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class NormLinear(nn.Sequential): def __init__(self, in_dim, out_dim, bias=True, std=0.02): super().__init__() self.add_module('bn', nn.BatchNorm1d(in_dim)) self.add_module('l', nn.Linear(in_dim, out_dim, bias=bias)) trunc_normal_(self.l.weight, std=std) if bias: nn.init.constant_(self.l.bias, 0) @torch.no_grad() def fuse(self): bn, l = self._modules.values() w = bn.weight / (bn.running_var + bn.eps) ** 0.5 b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 w = l.weight * w[None, :] if l.bias is None: b = b @ self.l.weight.T else: b = (l.weight @ b[:, None]).view(-1) + self.l.bias m = nn.Linear(w.size(1), w.size(0), device=l.weight.device) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class RepVggDw(nn.Module): def __init__(self, ed, kernel_size, legacy=False): super().__init__() self.conv = ConvNorm(ed, ed, kernel_size, 1, (kernel_size - 1) // 2, groups=ed) if legacy: self.conv1 = ConvNorm(ed, ed, 1, 1, 0, groups=ed) # Make torchscript happy. self.bn = nn.Identity() else: self.conv1 = nn.Conv2d(ed, ed, 1, 1, 0, groups=ed) self.bn = nn.BatchNorm2d(ed) self.dim = ed self.legacy = legacy def forward(self, x): return self.bn(self.conv(x) + self.conv1(x) + x) @torch.no_grad() def fuse(self): conv = self.conv.fuse() if self.legacy: conv1 = self.conv1.fuse() else: conv1 = self.conv1 conv_w = conv.weight conv_b = conv.bias conv1_w = conv1.weight conv1_b = conv1.bias conv1_w = nn.functional.pad(conv1_w, [1, 1, 1, 1]) identity = nn.functional.pad( torch.ones(conv1_w.shape[0], conv1_w.shape[1], 1, 1, device=conv1_w.device), [1, 1, 1, 1] ) final_conv_w = conv_w + conv1_w + identity final_conv_b = conv_b + conv1_b conv.weight.data.copy_(final_conv_w) conv.bias.data.copy_(final_conv_b) if not self.legacy: bn = self.bn w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = conv.weight * w[:, None, None, None] b = bn.bias + (conv.bias - bn.running_mean) * bn.weight / (bn.running_var + bn.eps) ** 0.5 conv.weight.data.copy_(w) conv.bias.data.copy_(b) return conv class RepVitMlp(nn.Module): def __init__(self, in_dim, hidden_dim, act_layer): super().__init__() self.conv1 = ConvNorm(in_dim, hidden_dim, 1, 1, 0) self.act = act_layer() self.conv2 = ConvNorm(hidden_dim, in_dim, 1, 1, 0, bn_weight_init=0) def forward(self, x): return self.conv2(self.act(self.conv1(x))) class RepViTBlock(nn.Module): def __init__(self, in_dim, mlp_ratio, kernel_size, use_se, act_layer, legacy=False): super(RepViTBlock, self).__init__() self.token_mixer = RepVggDw(in_dim, kernel_size, legacy) self.se = SqueezeExcite(in_dim, 0.25) if use_se else nn.Identity() self.channel_mixer = RepVitMlp(in_dim, in_dim * mlp_ratio, act_layer) def forward(self, x): x = self.token_mixer(x) x = self.se(x) identity = x x = self.channel_mixer(x) return identity + x class RepVitStem(nn.Module): def __init__(self, in_chs, out_chs, act_layer): super().__init__() self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1) self.act1 = act_layer() self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1) self.stride = 4 def forward(self, x): return self.conv2(self.act1(self.conv1(x))) class RepVitDownsample(nn.Module): def __init__(self, in_dim, mlp_ratio, out_dim, kernel_size, act_layer, legacy=False): super().__init__() self.pre_block = RepViTBlock(in_dim, mlp_ratio, kernel_size, use_se=False, act_layer=act_layer, legacy=legacy) self.spatial_downsample = ConvNorm(in_dim, in_dim, kernel_size, 2, (kernel_size - 1) // 2, groups=in_dim) self.channel_downsample = ConvNorm(in_dim, out_dim, 1, 1) self.ffn = RepVitMlp(out_dim, out_dim * mlp_ratio, act_layer) def forward(self, x): x = self.pre_block(x) x = self.spatial_downsample(x) x = self.channel_downsample(x) identity = x x = self.ffn(x) return x + identity class RepVitClassifier(nn.Module): def __init__(self, dim, num_classes, distillation=False, drop=0.0): super().__init__() self.head_drop = nn.Dropout(drop) self.head = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity() self.distillation = distillation self.distilled_training = False self.num_classes = num_classes if distillation: self.head_dist = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity() def forward(self, x): x = self.head_drop(x) if self.distillation: x1, x2 = self.head(x), self.head_dist(x) if self.training and self.distilled_training and not torch.jit.is_scripting(): return x1, x2 else: return (x1 + x2) / 2 else: x = self.head(x) return x @torch.no_grad() def fuse(self): if not self.num_classes > 0: return nn.Identity() head = self.head.fuse() if self.distillation: head_dist = self.head_dist.fuse() head.weight += head_dist.weight head.bias += head_dist.bias head.weight /= 2 head.bias /= 2 return head else: return head class RepVitStage(nn.Module): def __init__(self, in_dim, out_dim, depth, mlp_ratio, act_layer, kernel_size=3, downsample=True, legacy=False): super().__init__() if downsample: self.downsample = RepVitDownsample(in_dim, mlp_ratio, out_dim, kernel_size, act_layer, legacy) else: assert in_dim == out_dim self.downsample = nn.Identity() blocks = [] use_se = True for _ in range(depth): blocks.append(RepViTBlock(out_dim, mlp_ratio, kernel_size, use_se, act_layer, legacy)) use_se = not use_se self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class RepVit(nn.Module): def __init__( self, in_chans=3, img_size=224, embed_dim=(48,), depth=(2,), mlp_ratio=2, global_pool='avg', kernel_size=3, num_classes=1000, act_layer=nn.GELU, distillation=True, drop_rate=0.0, legacy=False, ): super(RepVit, self).__init__() self.grad_checkpointing = False self.global_pool = global_pool self.embed_dim = embed_dim self.num_classes = num_classes in_dim = embed_dim[0] self.stem = RepVitStem(in_chans, in_dim, act_layer) stride = self.stem.stride resolution = tuple([i // p for i, p in zip(to_2tuple(img_size), to_2tuple(stride))]) num_stages = len(embed_dim) mlp_ratios = to_ntuple(num_stages)(mlp_ratio) self.feature_info = [] stages = [] for i in range(num_stages): downsample = True if i != 0 else False stages.append( RepVitStage( in_dim, embed_dim[i], depth[i], mlp_ratio=mlp_ratios[i], act_layer=act_layer, kernel_size=kernel_size, downsample=downsample, legacy=legacy, ) ) stage_stride = 2 if downsample else 1 stride *= stage_stride resolution = tuple([(r - 1) // stage_stride + 1 for r in resolution]) self.feature_info += [dict(num_chs=embed_dim[i], reduction=stride, module=f'stages.{i}')] in_dim = embed_dim[i] self.stages = nn.Sequential(*stages) self.num_features = self.head_hidden_size = embed_dim[-1] self.head_drop = nn.Dropout(drop_rate) self.head = RepVitClassifier(embed_dim[-1], num_classes, distillation) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem=r'^stem', blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]) # stem and embed return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None, distillation: bool = False): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = RepVitClassifier(self.embed_dim[-1], num_classes, distillation) @torch.jit.ignore def set_distilled_training(self, enable=True): self.head.distilled_training = enable def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stages), indices) # forward pass x = self.stem(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index + 1] for feat_idx, stage in enumerate(stages): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(stage, x) else: x = stage(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': x = x.mean((2, 3), keepdim=False) x = self.head_drop(x) if pre_logits: return x return self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x @torch.no_grad() def fuse(self): def fuse_children(net): for child_name, child in net.named_children(): if hasattr(child, 'fuse'): fused = child.fuse() setattr(net, child_name, fused) fuse_children(fused) else: fuse_children(child) fuse_children(self) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.c', 'classifier': ('head.head.l', 'head.head_dist.l'), **kwargs, } default_cfgs = generate_default_cfgs( { 'repvit_m1.dist_in1k': _cfg( hf_hub_id='timm/', ), 'repvit_m2.dist_in1k': _cfg( hf_hub_id='timm/', ), 'repvit_m3.dist_in1k': _cfg( hf_hub_id='timm/', ), 'repvit_m0_9.dist_300e_in1k': _cfg( hf_hub_id='timm/', ), 'repvit_m0_9.dist_450e_in1k': _cfg( hf_hub_id='timm/', ), 'repvit_m1_0.dist_300e_in1k': _cfg( hf_hub_id='timm/', ), 'repvit_m1_0.dist_450e_in1k': _cfg( hf_hub_id='timm/', ), 'repvit_m1_1.dist_300e_in1k': _cfg( hf_hub_id='timm/', ), 'repvit_m1_1.dist_450e_in1k': _cfg( hf_hub_id='timm/', ), 'repvit_m1_5.dist_300e_in1k': _cfg( hf_hub_id='timm/', ), 'repvit_m1_5.dist_450e_in1k': _cfg( hf_hub_id='timm/', ), 'repvit_m2_3.dist_300e_in1k': _cfg( hf_hub_id='timm/', ), 'repvit_m2_3.dist_450e_in1k': _cfg( hf_hub_id='timm/', ), } ) def _create_repvit(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg( RepVit, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs, ) return model @register_model def repvit_m1(pretrained=False, **kwargs): """ Constructs a RepViT-M1 model """ model_args = dict(embed_dim=(48, 96, 192, 384), depth=(2, 2, 14, 2), legacy=True) return _create_repvit('repvit_m1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m2(pretrained=False, **kwargs): """ Constructs a RepViT-M2 model """ model_args = dict(embed_dim=(64, 128, 256, 512), depth=(2, 2, 12, 2), legacy=True) return _create_repvit('repvit_m2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m3(pretrained=False, **kwargs): """ Constructs a RepViT-M3 model """ model_args = dict(embed_dim=(64, 128, 256, 512), depth=(4, 4, 18, 2), legacy=True) return _create_repvit('repvit_m3', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m0_9(pretrained=False, **kwargs): """ Constructs a RepViT-M0.9 model """ model_args = dict(embed_dim=(48, 96, 192, 384), depth=(2, 2, 14, 2)) return _create_repvit('repvit_m0_9', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m1_0(pretrained=False, **kwargs): """ Constructs a RepViT-M1.0 model """ model_args = dict(embed_dim=(56, 112, 224, 448), depth=(2, 2, 14, 2)) return _create_repvit('repvit_m1_0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m1_1(pretrained=False, **kwargs): """ Constructs a RepViT-M1.1 model """ model_args = dict(embed_dim=(64, 128, 256, 512), depth=(2, 2, 12, 2)) return _create_repvit('repvit_m1_1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m1_5(pretrained=False, **kwargs): """ Constructs a RepViT-M1.5 model """ model_args = dict(embed_dim=(64, 128, 256, 512), depth=(4, 4, 24, 4)) return _create_repvit('repvit_m1_5', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m2_3(pretrained=False, **kwargs): """ Constructs a RepViT-M2.3 model """ model_args = dict(embed_dim=(80, 160, 320, 640), depth=(6, 6, 34, 2)) return _create_repvit('repvit_m2_3', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/repvit.py/0
{ "file_path": "pytorch-image-models/timm/models/repvit.py", "repo_id": "pytorch-image-models", "token_count": 9407 }
269
""" TinyViT Paper: `TinyViT: Fast Pretraining Distillation for Small Vision Transformers` - https://arxiv.org/abs/2207.10666 Adapted from official impl at https://github.com/microsoft/Cream/tree/main/TinyViT """ __all__ = ['TinyVit'] import itertools from functools import partial from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import LayerNorm2d, NormMlpClassifierHead, DropPath,\ trunc_normal_, resize_rel_pos_bias_table_levit, use_fused_attn from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_module from ._manipulate import checkpoint, checkpoint_seq from ._registry import register_model, generate_default_cfgs class ConvNorm(torch.nn.Sequential): def __init__(self, in_chs, out_chs, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): super().__init__() self.conv = nn.Conv2d(in_chs, out_chs, ks, stride, pad, dilation, groups, bias=False) self.bn = nn.BatchNorm2d(out_chs) torch.nn.init.constant_(self.bn.weight, bn_weight_init) torch.nn.init.constant_(self.bn.bias, 0) @torch.no_grad() def fuse(self): c, bn = self.conv, self.bn w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = c.weight * w[:, None, None, None] b = bn.bias - bn.running_mean * bn.weight / \ (bn.running_var + bn.eps) ** 0.5 m = torch.nn.Conv2d( w.size(1) * self.conv.groups, w.size(0), w.shape[2:], stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class PatchEmbed(nn.Module): def __init__(self, in_chs, out_chs, act_layer): super().__init__() self.stride = 4 self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1) self.act = act_layer() self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1) def forward(self, x): x = self.conv1(x) x = self.act(x) x = self.conv2(x) return x class MBConv(nn.Module): def __init__(self, in_chs, out_chs, expand_ratio, act_layer, drop_path): super().__init__() mid_chs = int(in_chs * expand_ratio) self.conv1 = ConvNorm(in_chs, mid_chs, ks=1) self.act1 = act_layer() self.conv2 = ConvNorm(mid_chs, mid_chs, ks=3, stride=1, pad=1, groups=mid_chs) self.act2 = act_layer() self.conv3 = ConvNorm(mid_chs, out_chs, ks=1, bn_weight_init=0.0) self.act3 = act_layer() self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x x = self.conv1(x) x = self.act1(x) x = self.conv2(x) x = self.act2(x) x = self.conv3(x) x = self.drop_path(x) x += shortcut x = self.act3(x) return x class PatchMerging(nn.Module): def __init__(self, dim, out_dim, act_layer): super().__init__() self.conv1 = ConvNorm(dim, out_dim, 1, 1, 0) self.act1 = act_layer() self.conv2 = ConvNorm(out_dim, out_dim, 3, 2, 1, groups=out_dim) self.act2 = act_layer() self.conv3 = ConvNorm(out_dim, out_dim, 1, 1, 0) def forward(self, x): x = self.conv1(x) x = self.act1(x) x = self.conv2(x) x = self.act2(x) x = self.conv3(x) return x class ConvLayer(nn.Module): def __init__( self, dim, depth, act_layer, drop_path=0., conv_expand_ratio=4., ): super().__init__() self.dim = dim self.depth = depth self.blocks = nn.Sequential(*[ MBConv( dim, dim, conv_expand_ratio, act_layer, drop_path[i] if isinstance(drop_path, list) else drop_path, ) for i in range(depth) ]) def forward(self, x): x = self.blocks(x) return x class NormMlp(nn.Module): def __init__( self, in_features, hidden_features=None, out_features=None, norm_layer=nn.LayerNorm, act_layer=nn.GELU, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.norm = norm_layer(in_features) self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.drop1 = nn.Dropout(drop) self.fc2 = nn.Linear(hidden_features, out_features) self.drop2 = nn.Dropout(drop) def forward(self, x): x = self.norm(x) x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.fc2(x) x = self.drop2(x) return x class Attention(torch.nn.Module): fused_attn: torch.jit.Final[bool] attention_bias_cache: Dict[str, torch.Tensor] def __init__( self, dim, key_dim, num_heads=8, attn_ratio=4, resolution=(14, 14), ): super().__init__() assert isinstance(resolution, tuple) and len(resolution) == 2 self.num_heads = num_heads self.scale = key_dim ** -0.5 self.key_dim = key_dim self.val_dim = int(attn_ratio * key_dim) self.out_dim = self.val_dim * num_heads self.attn_ratio = attn_ratio self.resolution = resolution self.fused_attn = use_fused_attn() self.norm = nn.LayerNorm(dim) self.qkv = nn.Linear(dim, num_heads * (self.val_dim + 2 * key_dim)) self.proj = nn.Linear(self.out_dim, dim) points = list(itertools.product(range(resolution[0]), range(resolution[1]))) N = len(points) attention_offsets = {} idxs = [] for p1 in points: for p2 in points: offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) if offset not in attention_offsets: attention_offsets[offset] = len(attention_offsets) idxs.append(attention_offsets[offset]) self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False) self.attention_bias_cache = {} @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} # clear ab cache def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): attn_bias = self.get_attention_biases(x.device) B, N, _ = x.shape # Normalization x = self.norm(x) qkv = self.qkv(x) # (B, N, num_heads, d) q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.val_dim], dim=3) # (B, num_heads, N, d) q = q.permute(0, 2, 1, 3) k = k.permute(0, 2, 1, 3) v = v.permute(0, 2, 1, 3) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_bias) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn + attn_bias attn = attn.softmax(dim=-1) x = attn @ v x = x.transpose(1, 2).reshape(B, N, self.out_dim) x = self.proj(x) return x class TinyVitBlock(nn.Module): """ TinyViT Block. Args: dim (int): Number of input channels. num_heads (int): Number of attention heads. window_size (int): Window size. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. drop (float, optional): Dropout rate. Default: 0.0 drop_path (float, optional): Stochastic depth rate. Default: 0.0 local_conv_size (int): the kernel size of the convolution between Attention and MLP. Default: 3 act_layer: the activation function. Default: nn.GELU """ def __init__( self, dim, num_heads, window_size=7, mlp_ratio=4., drop=0., drop_path=0., local_conv_size=3, act_layer=nn.GELU ): super().__init__() self.dim = dim self.num_heads = num_heads assert window_size > 0, 'window_size must be greater than 0' self.window_size = window_size self.mlp_ratio = mlp_ratio assert dim % num_heads == 0, 'dim must be divisible by num_heads' head_dim = dim // num_heads window_resolution = (window_size, window_size) self.attn = Attention(dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.mlp = NormMlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop, ) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() pad = local_conv_size // 2 self.local_conv = ConvNorm(dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim) def forward(self, x): B, H, W, C = x.shape L = H * W shortcut = x if H == self.window_size and W == self.window_size: x = x.reshape(B, L, C) x = self.attn(x) x = x.view(B, H, W, C) else: pad_b = (self.window_size - H % self.window_size) % self.window_size pad_r = (self.window_size - W % self.window_size) % self.window_size padding = pad_b > 0 or pad_r > 0 if padding: x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b)) # window partition pH, pW = H + pad_b, W + pad_r nH = pH // self.window_size nW = pW // self.window_size x = x.view(B, nH, self.window_size, nW, self.window_size, C).transpose(2, 3).reshape( B * nH * nW, self.window_size * self.window_size, C ) x = self.attn(x) # window reverse x = x.view(B, nH, nW, self.window_size, self.window_size, C).transpose(2, 3).reshape(B, pH, pW, C) if padding: x = x[:, :H, :W].contiguous() x = shortcut + self.drop_path1(x) x = x.permute(0, 3, 1, 2) x = self.local_conv(x) x = x.reshape(B, C, L).transpose(1, 2) x = x + self.drop_path2(self.mlp(x)) return x.view(B, H, W, C) def extra_repr(self) -> str: return f"dim={self.dim}, num_heads={self.num_heads}, " \ f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}" register_notrace_module(TinyVitBlock) class TinyVitStage(nn.Module): """ A basic TinyViT layer for one stage. Args: dim (int): Number of input channels. out_dim: the output dimension of the layer depth (int): Number of blocks. num_heads (int): Number of attention heads. window_size (int): Local window size. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. drop (float, optional): Dropout rate. Default: 0.0 drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None local_conv_size: the kernel size of the depthwise convolution between attention and MLP. Default: 3 act_layer: the activation function. Default: nn.GELU """ def __init__( self, dim, out_dim, depth, num_heads, window_size, mlp_ratio=4., drop=0., drop_path=0., downsample=None, local_conv_size=3, act_layer=nn.GELU, ): super().__init__() self.depth = depth self.out_dim = out_dim # patch merging layer if downsample is not None: self.downsample = downsample( dim=dim, out_dim=out_dim, act_layer=act_layer, ) else: self.downsample = nn.Identity() assert dim == out_dim # build blocks self.blocks = nn.Sequential(*[ TinyVitBlock( dim=out_dim, num_heads=num_heads, window_size=window_size, mlp_ratio=mlp_ratio, drop=drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, local_conv_size=local_conv_size, act_layer=act_layer, ) for i in range(depth)]) def forward(self, x): x = self.downsample(x) x = x.permute(0, 2, 3, 1) # BCHW -> BHWC x = self.blocks(x) x = x.permute(0, 3, 1, 2) # BHWC -> BCHW return x def extra_repr(self) -> str: return f"dim={self.out_dim}, depth={self.depth}" class TinyVit(nn.Module): def __init__( self, in_chans=3, num_classes=1000, global_pool='avg', embed_dims=(96, 192, 384, 768), depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), window_sizes=(7, 7, 14, 7), mlp_ratio=4., drop_rate=0., drop_path_rate=0.1, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, act_layer=nn.GELU, ): super().__init__() self.num_classes = num_classes self.depths = depths self.num_stages = len(depths) self.mlp_ratio = mlp_ratio self.grad_checkpointing = use_checkpoint self.patch_embed = PatchEmbed( in_chs=in_chans, out_chs=embed_dims[0], act_layer=act_layer, ) # stochastic depth rate rule dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # build stages self.stages = nn.Sequential() stride = self.patch_embed.stride prev_dim = embed_dims[0] self.feature_info = [] for stage_idx in range(self.num_stages): if stage_idx == 0: stage = ConvLayer( dim=prev_dim, depth=depths[stage_idx], act_layer=act_layer, drop_path=dpr[:depths[stage_idx]], conv_expand_ratio=mbconv_expand_ratio, ) else: out_dim = embed_dims[stage_idx] drop_path_rate = dpr[sum(depths[:stage_idx]):sum(depths[:stage_idx + 1])] stage = TinyVitStage( dim=embed_dims[stage_idx - 1], out_dim=out_dim, depth=depths[stage_idx], num_heads=num_heads[stage_idx], window_size=window_sizes[stage_idx], mlp_ratio=self.mlp_ratio, drop=drop_rate, local_conv_size=local_conv_size, drop_path=drop_path_rate, downsample=PatchMerging, act_layer=act_layer, ) prev_dim = out_dim stride *= 2 self.stages.append(stage) self.feature_info += [dict(num_chs=prev_dim, reduction=stride, module=f'stages.{stage_idx}')] # Classifier head self.num_features = self.head_hidden_size = embed_dims[-1] norm_layer_cf = partial(LayerNorm2d, eps=1e-5) self.head = NormMlpClassifierHead( self.num_features, num_classes, pool_type=global_pool, norm_layer=norm_layer_cf, ) # init weights self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay_keywords(self): return {'attention_biases'} @torch.jit.ignore def no_weight_decay(self): return {x for x in self.state_dict().keys() if 'attention_biases' in x} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^patch_embed', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+).downsample', (0,)), (r'^stages\.(\d+)\.\w+\.(\d+)', None), ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stages), indices) # forward pass x = self.patch_embed(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index + 1] for feat_idx, stage in enumerate(stages): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(stage, x) else: x = stage(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) return x def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'model' in state_dict.keys(): state_dict = state_dict['model'] target_sd = model.state_dict() out_dict = {} for k, v in state_dict.items(): if k.endswith('attention_bias_idxs'): continue if 'attention_biases' in k: # TODO: whether move this func into model for dynamic input resolution? (high risk) v = resize_rel_pos_bias_table_levit(v.T, target_sd[k].shape[::-1]).T out_dict[k] = v return out_dict def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.conv1.conv', 'classifier': 'head.fc', 'pool_size': (7, 7), 'input_size': (3, 224, 224), 'crop_pct': 0.95, **kwargs, } default_cfgs = generate_default_cfgs({ 'tiny_vit_5m_224.dist_in22k': _cfg( hf_hub_id='timm/', # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_22k_distill.pth', num_classes=21841 ), 'tiny_vit_5m_224.dist_in22k_ft_in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_22kto1k_distill.pth' ), 'tiny_vit_5m_224.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_1k.pth' ), 'tiny_vit_11m_224.dist_in22k': _cfg( hf_hub_id='timm/', # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_22k_distill.pth', num_classes=21841 ), 'tiny_vit_11m_224.dist_in22k_ft_in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_22kto1k_distill.pth' ), 'tiny_vit_11m_224.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_1k.pth' ), 'tiny_vit_21m_224.dist_in22k': _cfg( hf_hub_id='timm/', # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22k_distill.pth', num_classes=21841 ), 'tiny_vit_21m_224.dist_in22k_ft_in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_distill.pth' ), 'tiny_vit_21m_224.in1k': _cfg( hf_hub_id='timm/', #url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_1k.pth' ), 'tiny_vit_21m_384.dist_in22k_ft_in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_384_distill.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'tiny_vit_21m_512.dist_in22k_ft_in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_512_distill.pth', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash', ), }) def _create_tiny_vit(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg( TinyVit, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), pretrained_filter_fn=checkpoint_filter_fn, **kwargs ) return model @register_model def tiny_vit_5m_224(pretrained=False, **kwargs): model_kwargs = dict( embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], drop_path_rate=0.0, ) model_kwargs.update(kwargs) return _create_tiny_vit('tiny_vit_5m_224', pretrained, **model_kwargs) @register_model def tiny_vit_11m_224(pretrained=False, **kwargs): model_kwargs = dict( embed_dims=[64, 128, 256, 448], depths=[2, 2, 6, 2], num_heads=[2, 4, 8, 14], window_sizes=[7, 7, 14, 7], drop_path_rate=0.1, ) model_kwargs.update(kwargs) return _create_tiny_vit('tiny_vit_11m_224', pretrained, **model_kwargs) @register_model def tiny_vit_21m_224(pretrained=False, **kwargs): model_kwargs = dict( embed_dims=[96, 192, 384, 576], depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 18], window_sizes=[7, 7, 14, 7], drop_path_rate=0.2, ) model_kwargs.update(kwargs) return _create_tiny_vit('tiny_vit_21m_224', pretrained, **model_kwargs) @register_model def tiny_vit_21m_384(pretrained=False, **kwargs): model_kwargs = dict( embed_dims=[96, 192, 384, 576], depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 18], window_sizes=[12, 12, 24, 12], drop_path_rate=0.1, ) model_kwargs.update(kwargs) return _create_tiny_vit('tiny_vit_21m_384', pretrained, **model_kwargs) @register_model def tiny_vit_21m_512(pretrained=False, **kwargs): model_kwargs = dict( embed_dims=[96, 192, 384, 576], depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 18], window_sizes=[16, 16, 32, 16], drop_path_rate=0.1, ) model_kwargs.update(kwargs) return _create_tiny_vit('tiny_vit_21m_512', pretrained, **model_kwargs)
pytorch-image-models/timm/models/tiny_vit.py/0
{ "file_path": "pytorch-image-models/timm/models/tiny_vit.py", "repo_id": "pytorch-image-models", "token_count": 13498 }
270
from .adabelief import AdaBelief from .adafactor import Adafactor from .adafactor_bv import AdafactorBigVision from .adahessian import Adahessian from .adamp import AdamP from .adamw import AdamWLegacy from .adan import Adan from .adopt import Adopt from .lamb import Lamb from .laprop import LaProp from .lars import Lars from .lion import Lion from .lookahead import Lookahead from .madgrad import MADGRAD from .mars import Mars from .nadam import NAdamLegacy from .nadamw import NAdamW from .nvnovograd import NvNovoGrad from .radam import RAdamLegacy from .rmsprop_tf import RMSpropTF from .sgdp import SGDP from .sgdw import SGDW # bring common torch.optim Optimizers into timm.optim namespace for consistency from torch.optim import Adadelta, Adagrad, Adamax, Adam, AdamW, RMSprop, SGD try: # in case any very old torch versions being used from torch.optim import NAdam, RAdam except ImportError: pass from ._optim_factory import list_optimizers, get_optimizer_class, get_optimizer_info, OptimInfo, OptimizerRegistry, \ create_optimizer_v2, create_optimizer, optimizer_kwargs from ._param_groups import param_groups_layer_decay, param_groups_weight_decay, auto_group_layers
pytorch-image-models/timm/optim/__init__.py/0
{ "file_path": "pytorch-image-models/timm/optim/__init__.py", "repo_id": "pytorch-image-models", "token_count": 385 }
271
""" Lion Optimizer Paper: `Symbolic Discovery of Optimization Algorithms` - https://arxiv.org/abs/2302.06675 Original Impl: https://github.com/google/automl/tree/master/lion References for added functionality: Cautious Optimizers: https://arxiv.org/abs/2411.16085 Why Gradients Rapidly Increase Near the End of Training: https://arxiv.org/abs/2506.02285 """ # Copyright 2023 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from typing import List, Optional, Tuple import torch from torch.optim.optimizer import Optimizer from ._types import ParamsT class Lion(Optimizer): r"""Implements Lion algorithm.""" def __init__( self, params: ParamsT, lr: float = 1e-4, betas: Tuple[float, float] = (0.9, 0.99), weight_decay: float = 0.0, caution: bool = False, corrected_weight_decay: bool = False, maximize: bool = False, foreach: Optional[bool] = None, ): """Initialize the hyperparameters. Args: params: iterable of parameters to optimize or dicts defining parameter groups lr: learning rate betas: coefficients used for computing running averages of gradient and its square weight_decay: weight decay coefficient caution: apply caution corrected_weight_decay: apply corrected weight decay (lr**2 / max_lr) """ if not 0.0 <= lr: raise ValueError('Invalid learning rate: {}'.format(lr)) if not 0.0 <= betas[0] < 1.0: raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) defaults = dict( lr=lr, betas=betas, weight_decay=weight_decay, caution=caution, corrected_weight_decay=corrected_weight_decay, foreach=foreach, maximize=maximize, ) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('caution', False) group.setdefault('corrected_weight_decay', False) group.setdefault('maximize', False) group.setdefault('foreach', None) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure: A closure that reevaluates the model and returns the loss. Returns: the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] beta1, beta2 = group['betas'] for p in group['params']: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('Lion does not support sparse gradients') grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) lion( params_with_grad, grads, exp_avgs, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], caution=group['caution'], maximize=group['maximize'], foreach=group['foreach'], max_lr=self.defaults['lr'] if group['corrected_weight_decay'] else None, ) return loss def lion( params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim maximize: bool = False, foreach: bool = None, *, beta1: float, beta2: float, lr: float, weight_decay: float, caution: bool, max_lr: Optional[float] = None, ): r"""Functional API that performs Lion algorithm computation. """ if foreach is None: try: # cannot do foreach if this overload doesn't exist when caution enabled foreach = not caution or 'Scalar' in torch.ops.aten._foreach_maximum_.overloads() except: foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and not torch.jit.is_scripting(): func = _multi_tensor_lion else: func = _single_tensor_lion func( params, grads, exp_avgs, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, caution=caution, maximize=maximize, max_lr=max_lr, ) def _single_tensor_lion( params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, caution: bool, maximize: bool, max_lr: Optional[float], ): for i, param in enumerate(params): grad = grads[i] if not maximize else -grads[i] exp_avg = exp_avgs[i] if torch.is_complex(param): grad = torch.view_as_real(grad) exp_avg = torch.view_as_real(exp_avg) param = torch.view_as_real(param) # Perform stepweight decay wd_scale = lr if max_lr is None else lr ** 2 / max_lr param.mul_(1 - wd_scale * weight_decay) # Weight update update = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1).sign_() if caution: # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 mask = (update * grad > 0).to(grad.dtype) mask.div_(mask.mean().clamp_(min=1e-3)) update.mul_(mask) param.add_(update, alpha=-lr) # Decay the momentum running average coefficient exp_avg.lerp_(grad, 1 - beta2) def _multi_tensor_lion( params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, caution: bool, maximize: bool, max_lr: Optional[float], ): if len(params) == 0: return if maximize: grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment] grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs] params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params] # Perform stepweight decay wd_scale = lr if max_lr is None else lr ** 2 / max_lr torch._foreach_mul_(params, 1 - wd_scale * weight_decay) # Weight update updates = torch._foreach_mul(exp_avgs, beta1) torch._foreach_add_(updates, grads, alpha=1 - beta1) updates = [u.sign_() for u in updates] if caution: # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 masks = torch._foreach_mul(updates, grads) masks = [(m > 0).to(g.dtype) for m, g in zip(masks, grads)] mask_scale = [m.mean() for m in masks] torch._foreach_maximum_(mask_scale, 1e-3) torch._foreach_div_(masks, mask_scale) torch._foreach_mul_(updates, masks) torch._foreach_add_(params, updates, alpha=-lr) # Decay the momentum running average coefficient torch._foreach_mul_(exp_avgs, beta2) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta2)
pytorch-image-models/timm/optim/lion.py/0
{ "file_path": "pytorch-image-models/timm/optim/lion.py", "repo_id": "pytorch-image-models", "token_count": 4091 }
272
""" Plateau Scheduler Adapts PyTorch plateau scheduler and allows application of noise, warmup. Hacked together by / Copyright 2020 Ross Wightman """ import torch from typing import List from .scheduler import Scheduler class PlateauLRScheduler(Scheduler): """Decay the LR by a factor every time the validation loss plateaus.""" def __init__( self, optimizer, decay_rate=0.1, patience_t=10, verbose=True, threshold=1e-4, cooldown_t=0, warmup_t=0, warmup_lr_init=0, lr_min=0, mode='max', noise_range_t=None, noise_type='normal', noise_pct=0.67, noise_std=1.0, noise_seed=None, initialize=True, ): super().__init__( optimizer, 'lr', noise_range_t=noise_range_t, noise_type=noise_type, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.optimizer, patience=patience_t, factor=decay_rate, verbose=verbose, threshold=threshold, cooldown=cooldown_t, mode=mode, min_lr=lr_min ) self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] self.restore_lr = None def state_dict(self): return { 'best': self.lr_scheduler.best, 'last_epoch': self.lr_scheduler.last_epoch, } def load_state_dict(self, state_dict): self.lr_scheduler.best = state_dict['best'] if 'last_epoch' in state_dict: self.lr_scheduler.last_epoch = state_dict['last_epoch'] # override the base class step fn completely def step(self, epoch, metric=None): if epoch <= self.warmup_t: lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps] super().update_groups(lrs) else: if self.restore_lr is not None: # restore actual LR from before our last noise perturbation before stepping base for i, param_group in enumerate(self.optimizer.param_groups): param_group['lr'] = self.restore_lr[i] self.restore_lr = None self.lr_scheduler.step(metric, epoch) # step the base scheduler if self._is_apply_noise(epoch): self._apply_noise(epoch) def step_update(self, num_updates: int, metric: float = None): return None def _apply_noise(self, epoch): noise = self._calculate_noise(epoch) # apply the noise on top of previous LR, cache the old value so we can restore for normal # stepping of base scheduler restore_lr = [] for i, param_group in enumerate(self.optimizer.param_groups): old_lr = float(param_group['lr']) restore_lr.append(old_lr) new_lr = old_lr + old_lr * noise param_group['lr'] = new_lr self.restore_lr = restore_lr def _get_lr(self, t: int) -> List[float]: assert False, 'should not be called as step is overridden'
pytorch-image-models/timm/scheduler/plateau_lr.py/0
{ "file_path": "pytorch-image-models/timm/scheduler/plateau_lr.py", "repo_id": "pytorch-image-models", "token_count": 1807 }
273
""" Eval metrics and related Hacked together by / Copyright 2020 Ross Wightman """ class AverageMeter: """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" maxk = min(max(topk), output.size()[1]) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.reshape(1, -1).expand_as(pred)) return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
pytorch-image-models/timm/utils/metrics.py/0
{ "file_path": "pytorch-image-models/timm/utils/metrics.py", "repo_id": "pytorch-image-models", "token_count": 374 }
274
repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.2.1 hooks: - id: ruff args: - --fix - id: ruff-format - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - id: check-merge-conflict - id: check-yaml
smolagents/.pre-commit-config.yaml/0
{ "file_path": "smolagents/.pre-commit-config.yaml", "repo_id": "smolagents", "token_count": 158 }
275
# Agentic RAG [[open-in-colab]] ## Introduction to Retrieval-Augmented Generation (RAG) Retrieval-Augmented Generation (RAG) combines the power of large language models with external knowledge retrieval to produce more accurate, factual, and contextually relevant responses. At its core, RAG is about "using an LLM to answer a user query, but basing the answer on information retrieved from a knowledge base." ### Why Use RAG? RAG offers several significant advantages over using vanilla or fine-tuned LLMs: 1. **Factual Grounding**: Reduces hallucinations by anchoring responses in retrieved facts 2. **Domain Specialization**: Provides domain-specific knowledge without model retraining 3. **Knowledge Recency**: Allows access to information beyond the model's training cutoff 4. **Transparency**: Enables citation of sources for generated content 5. **Control**: Offers fine-grained control over what information the model can access ### Limitations of Traditional RAG Despite its benefits, traditional RAG approaches face several challenges: - **Single Retrieval Step**: If the initial retrieval results are poor, the final generation will suffer - **Query-Document Mismatch**: User queries (often questions) may not match well with documents containing answers (often statements) - **Limited Reasoning**: Simple RAG pipelines don't allow for multi-step reasoning or query refinement - **Context Window Constraints**: Retrieved documents must fit within the model's context window ## Agentic RAG: A More Powerful Approach We can overcome these limitations by implementing an **Agentic RAG** system - essentially an agent equipped with retrieval capabilities. This approach transforms RAG from a rigid pipeline into an interactive, reasoning-driven process. ### Key Benefits of Agentic RAG An agent with retrieval tools can: 1. โœ… **Formulate optimized queries**: The agent can transform user questions into retrieval-friendly queries 2. โœ… **Perform multiple retrievals**: The agent can retrieve information iteratively as needed 3. โœ… **Reason over retrieved content**: The agent can analyze, synthesize, and draw conclusions from multiple sources 4. โœ… **Self-critique and refine**: The agent can evaluate retrieval results and adjust its approach This approach naturally implements advanced RAG techniques: - **Hypothetical Document Embedding (HyDE)**: Instead of using the user query directly, the agent formulates retrieval-optimized queries ([paper reference](https://huggingface.co/papers/2212.10496)) - **Self-Query Refinement**: The agent can analyze initial results and perform follow-up retrievals with refined queries ([technique reference](https://docs.llamaindex.ai/en/stable/examples/evaluation/RetryQuery/)) ## Building an Agentic RAG System Let's build a complete Agentic RAG system step by step. We'll create an agent that can answer questions about the Hugging Face Transformers library by retrieving information from its documentation. You can follow along with the code snippets below, or check out the full example in the smolagents GitHub repository: [examples/rag.py](https://github.com/huggingface/smolagents/blob/main/examples/rag.py). ### Step 1: Install Required Dependencies First, we need to install the necessary packages: ```bash pip install smolagents pandas langchain langchain-community sentence-transformers datasets python-dotenv rank_bm25 --upgrade ``` If you plan to use Hugging Face's Inference API, you'll need to set up your API token: ```python # Load environment variables (including HF_TOKEN) from dotenv import load_dotenv load_dotenv() ``` ### Step 2: Prepare the Knowledge Base We'll use a dataset containing Hugging Face documentation and prepare it for retrieval: ```python import datasets from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.retrievers import BM25Retriever # Load the Hugging Face documentation dataset knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train") # Filter to include only Transformers documentation knowledge_base = knowledge_base.filter(lambda row: row["source"].startswith("huggingface/transformers")) # Convert dataset entries to Document objects with metadata source_docs = [ Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]}) for doc in knowledge_base ] # Split documents into smaller chunks for better retrieval text_splitter = RecursiveCharacterTextSplitter( chunk_size=500, # Characters per chunk chunk_overlap=50, # Overlap between chunks to maintain context add_start_index=True, strip_whitespace=True, separators=["\n\n", "\n", ".", " ", ""], # Priority order for splitting ) docs_processed = text_splitter.split_documents(source_docs) print(f"Knowledge base prepared with {len(docs_processed)} document chunks") ``` ### Step 3: Create a Retriever Tool Now we'll create a custom tool that our agent can use to retrieve information from the knowledge base: ```python from smolagents import Tool class RetrieverTool(Tool): name = "retriever" description = "Uses semantic search to retrieve the parts of transformers documentation that could be most relevant to answer your query." inputs = { "query": { "type": "string", "description": "The query to perform. This should be semantically close to your target documents. Use the affirmative form rather than a question.", } } output_type = "string" def __init__(self, docs, **kwargs): super().__init__(**kwargs) # Initialize the retriever with our processed documents self.retriever = BM25Retriever.from_documents( docs, k=10 # Return top 10 most relevant documents ) def forward(self, query: str) -> str: """Execute the retrieval based on the provided query.""" assert isinstance(query, str), "Your search query must be a string" # Retrieve relevant documents docs = self.retriever.invoke(query) # Format the retrieved documents for readability return "\nRetrieved documents:\n" + "".join( [ f"\n\n===== Document {str(i)} =====\n" + doc.page_content for i, doc in enumerate(docs) ] ) # Initialize our retriever tool with the processed documents retriever_tool = RetrieverTool(docs_processed) ``` > [!TIP] > We're using BM25, a lexical retrieval method, for simplicity and speed. For production systems, you might want to use semantic search with embeddings for better retrieval quality. Check the [MTEB Leaderboard](https://huggingface.co/spaces/mteb/leaderboard) for high-quality embedding models. ### Step 4: Create an Advanced Retrieval Agent Now we'll create an agent that can use our retriever tool to answer questions: ```python from smolagents import InferenceClientModel, CodeAgent # Initialize the agent with our retriever tool agent = CodeAgent( tools=[retriever_tool], # List of tools available to the agent model=InferenceClientModel(), # Default model "Qwen/Qwen2.5-Coder-32B-Instruct" max_steps=4, # Limit the number of reasoning steps verbosity_level=2, # Show detailed agent reasoning ) # To use a specific model, you can specify it like this: # model=InferenceClientModel(model_id="meta-llama/Llama-3.3-70B-Instruct") ``` > [!TIP] > Inference Providers give access to hundreds of models, powered by serverless inference partners. A list of supported providers can be found [here](https://huggingface.co/docs/inference-providers/index). ### Step 5: Run the Agent to Answer Questions Let's use our agent to answer a question about Transformers: ```python # Ask a question that requires retrieving information question = "For a transformers model training, which is slower, the forward or the backward pass?" # Run the agent to get an answer agent_output = agent.run(question) # Display the final answer print("\nFinal answer:") print(agent_output) ``` ## Practical Applications of Agentic RAG Agentic RAG systems can be applied to various use cases: 1. **Technical Documentation Assistance**: Help users navigate complex technical documentation 2. **Research Paper Analysis**: Extract and synthesize information from scientific papers 3. **Legal Document Review**: Find relevant precedents and clauses in legal documents 4. **Customer Support**: Answer questions based on product documentation and knowledge bases 5. **Educational Tutoring**: Provide explanations based on textbooks and learning materials ## Conclusion Agentic RAG represents a significant advancement over traditional RAG pipelines. By combining the reasoning capabilities of LLM agents with the factual grounding of retrieval systems, we can build more powerful, flexible, and accurate information systems. The approach we've demonstrated: - Overcomes the limitations of single-step retrieval - Enables more natural interactions with knowledge bases - Provides a framework for continuous improvement through self-critique and query refinement As you build your own Agentic RAG systems, consider experimenting with different retrieval methods, agent architectures, and knowledge sources to find the optimal configuration for your specific use case.
smolagents/docs/source/en/examples/rag.md/0
{ "file_path": "smolagents/docs/source/en/examples/rag.md", "repo_id": "smolagents", "token_count": 2523 }
276
- title: Get started sections: - local: index title: ์†Œ๊ฐœ - title: ์˜ˆ์ œ sections: - local: examples/text_to_sql title: ์Šค์Šค๋กœ ์˜ค๋ฅ˜๋ฅผ ์ˆ˜์ •ํ•˜๋Š” Text-to-SQL - local: in_translation title: (๋ฒˆ์—ญ์ค‘) Master your knowledge base with agentic RAG - local: examples/multiagents title: ๋ฉ€ํ‹ฐ ์—์ด์ „ํŠธ ์‹œ์Šคํ…œ ์˜ค์ผ€์ŠคํŠธ๋ ˆ์ด์…˜ - local: in_translation title: (๋ฒˆ์—ญ์ค‘) Build a web browser agent using vision models - local: in_translation title: (๋ฒˆ์—ญ์ค‘) Using different models - local: in_translation title: (๋ฒˆ์—ญ์ค‘) "Human-in-the-Loop: Customize agent plan interactively" - local: in_translation title: (๋ฒˆ์—ญ์ค‘) Async Applications with Agents - title: Reference sections: - local: reference/agents title: Agent-related objects - local: reference/models title: Model-related objects - title: Tools sections: - title: Tool-related objects local: reference/tools - title: Built-in Tools local: reference/default_tools
smolagents/docs/source/ko/_toctree.yml/0
{ "file_path": "smolagents/docs/source/ko/_toctree.yml", "repo_id": "smolagents", "token_count": 424 }
277
# ๅทฅๅ…ท <Tip warning={true}> Smolagents ๆ˜ฏไธ€ไธชๅฎž้ชŒๆ€ง API๏ผŒๅฏ่ƒฝไผš้šๆ—ถๆ›ดๆ”นใ€‚็”ฑไบŽ API ๆˆ–ๅบ•ๅฑ‚ๆจกๅž‹ๅฏ่ƒฝๅ‘็”Ÿๅ˜ๅŒ–๏ผŒไปฃ็†่ฟ”ๅ›ž็š„็ป“ๆžœๅฏ่ƒฝไผšๆœ‰ๆ‰€ไธๅŒใ€‚ </Tip> ่ฆไบ†่งฃๆ›ดๅคšๅ…ณไบŽๆ™บ่ƒฝไฝ“ๅ’Œๅทฅๅ…ท็š„ไฟกๆฏ๏ผŒ่ฏทๅŠกๅฟ…้˜…่ฏป[ๅ…ฅ้—จๆŒ‡ๅ—](../index)ใ€‚ๆœฌ้กต้ขๅŒ…ๅซๅบ•ๅฑ‚็ฑป็š„ API ๆ–‡ๆกฃใ€‚ ## ๅทฅๅ…ท ### load_tool [[autodoc]] load_tool ### tool [[autodoc]] tool ### Tool [[autodoc]] Tool ### launch_gradio_demo [[autodoc]] launch_gradio_demo ## ้ป˜่ฎคๅทฅๅ…ท ### PythonInterpreterTool [[autodoc]] PythonInterpreterTool ### FinalAnswerTool [[autodoc]] FinalAnswerTool ### UserInputTool [[autodoc]] UserInputTool ### DuckDuckGoSearchTool [[autodoc]] DuckDuckGoSearchTool ### GoogleSearchTool [[autodoc]] GoogleSearchTool ### VisitWebpageTool [[autodoc]] VisitWebpageTool ### SpeechToTextTool [[autodoc]] SpeechToTextTool ## ๅทฅๅ…ท้›†ๅˆ [[autodoc]] ToolCollection ## ๆ™บ่ƒฝไฝ“็ฑปๅž‹ ๆ™บ่ƒฝไฝ“ๅฏไปฅๅค„็†ๅทฅๅ…ทไน‹้—ด็š„ไปปไฝ•็ฑปๅž‹็š„ๅฏน่ฑก๏ผ›ๅทฅๅ…ทๆ˜ฏๅฎŒๅ…จๅคšๆจกๆ€็š„๏ผŒๅฏไปฅๆŽฅๅ—ๅ’Œ่ฟ”ๅ›žๆ–‡ๆœฌใ€ๅ›พๅƒใ€้Ÿณ้ข‘ใ€่ง†้ข‘ไปฅๅŠๅ…ถไป–็ฑปๅž‹็š„ๅฏน่ฑกใ€‚ไธบไบ†ๅขžๅŠ ๅทฅๅ…ทไน‹้—ด็š„ๅ…ผๅฎนๆ€ง๏ผŒไปฅๅŠๆญฃ็กฎๅ‘ˆ็Žฐๅœจ ipython๏ผˆjupyterใ€colabใ€ipython notebooks ็ญ‰๏ผ‰ไธญ็š„่ฟ”ๅ›ž็ป“ๆžœ๏ผŒๆˆ‘ไปฌไธบ่ฟ™ไบ›็ฑปๅž‹ๅฎž็Žฐไบ†ๅŒ…่ฃ…็ฑปใ€‚ ่ขซๅŒ…่ฃ…็š„ๅฏน่ฑกๅบ”่ฏฅ็ปง็ปญไฟๆŒๅ…ถๅˆๅง‹่กŒไธบ๏ผ›ไพ‹ๅฆ‚๏ผŒไธ€ไธชๆ–‡ๆœฌๅฏน่ฑกๅบ”็ปง็ปญ่กจ็Žฐไธบๅญ—็ฌฆไธฒ๏ผŒไธ€ไธชๅ›พๅƒๅฏน่ฑกๅบ”็ปง็ปญ่กจ็Žฐไธบ `PIL.Image`ใ€‚ ่ฟ™ไบ›็ฑปๅž‹ๆœ‰ไธ‰ไธช็‰นๅฎš็š„็”จ้€”๏ผš - ่ฐƒ็”จ `to_raw` ๆ–นๆณ•ๆ—ถ๏ผŒๅบ”่ฟ”ๅ›žๅบ•ๅฑ‚ๅฏน่ฑก - ่ฐƒ็”จ `to_string` ๆ–นๆณ•ๆ—ถ๏ผŒๅบ”ๅฐ†ๅฏน่ฑก่ฝฌๆขไธบๅญ—็ฌฆไธฒ๏ผšๅฏนไบŽ `AgentText` ็ฑปๅž‹๏ผŒๅฏไปฅ็›ดๆŽฅ่ฟ”ๅ›žๅญ—็ฌฆไธฒ๏ผ›ๅฏนไบŽๅ…ถไป–ๅฎžไพ‹๏ผŒๅˆ™่ฟ”ๅ›žๅฏน่ฑกๅบๅˆ—ๅŒ–็‰ˆๆœฌ็š„่ทฏๅพ„ - ๅœจ ipython ๅ†…ๆ ธไธญๆ˜พ็คบๆ—ถ๏ผŒๅบ”ๆญฃ็กฎๆ˜พ็คบๅฏน่ฑก ### AgentText [[autodoc]] smolagents.agent_types.AgentText ### AgentImage [[autodoc]] smolagents.agent_types.AgentImage ### AgentAudio [[autodoc]] smolagents.agent_types.AgentAudio
smolagents/docs/source/zh/reference/tools.md/0
{ "file_path": "smolagents/docs/source/zh/reference/tools.md", "repo_id": "smolagents", "token_count": 1123 }
278
<jupyter_start><jupyter_code>!pip install plotly kaleido datasets nbformat -U -q import os import datasets import pandas as pd from dotenv import load_dotenv from huggingface_hub import login load_dotenv(override=True) login(os.getenv("HF_TOKEN")) pd.set_option("max_colwidth", None) OUTPUT_DIR = "output" eval_ds = datasets.load_dataset("gaia-benchmark/GAIA", "2023_all")["validation"] eval_ds = eval_ds.rename_columns({"Question": "question", "Final answer": "true_answer", "Level": "task"}) eval_df = pd.DataFrame(eval_ds)<jupyter_output><empty_output><jupyter_text>1. Load all results<jupyter_code>import glob results = [] for f in glob.glob(f"{OUTPUT_DIR}/validation/*.jsonl"): df = pd.read_json(f, lines=True) df["agent_name"] = f.split("/")[-1].split(".")[0] results.append(df) result_df = pd.concat(results) result_df["prediction"] = result_df["prediction"].fillna("No prediction") import re from collections import Counter from scripts.gaia_scorer import check_close_call, question_scorer result_df["is_correct"] = result_df.apply(lambda x: question_scorer(x["prediction"], x["true_answer"]), axis=1) result_df["is_near_correct"] = result_df.apply( lambda x: check_close_call(x["prediction"], x["true_answer"], x["is_correct"]), axis=1, ) result_df["count_steps"] = result_df["intermediate_steps"].apply(len) def find_attachment(question): matches = eval_df.loc[eval_df["question"].apply(lambda x: x in question), "file_name"] if len(matches) == 0: return "Not found" file_path = matches.values[0] if isinstance(file_path, str) and len(file_path) > 0: return file_path.split(".")[-1] else: return "None" result_df["attachment_type"] = result_df["question"].apply(find_attachment) def extract_tool_calls(code): regex = r"\b(\w+)\(" function_calls = [el for el in re.findall(regex, code) if el.islower()] function_call_counter = Counter(function_calls) return function_call_counter def sum_tool_calls(steps): total_count = Counter() for step in steps: if "llm_output" in step: total_count += extract_tool_calls(step["llm_output"]) return total_count def get_durations(row): # start_datetime = datetime.strptime(row['start_time'], "%Y-%m-%d %H:%M:%S") # end_datetime = datetime.strptime(row['end_time'], "%Y-%m-%d %H:%M:%S") duration_timedelta = row["end_time"] - row["start_time"] return int(duration_timedelta.total_seconds()) result_df["duration"] = result_df.apply(get_durations, axis=1) # result_df["tool_calls"] = result_df["intermediate_steps"].apply(sum_tool_calls) result_df["agent_name"].value_counts()<jupyter_output><empty_output><jupyter_text>2. Inspect specific runs<jupyter_code>sel_df = result_df # sel_df = sel_df.loc[ # (result_df["agent_name"].isin(list_versions)) # ] sel_df = sel_df.reset_index(drop=True) display(sel_df["agent_name"].value_counts()) sel_df = sel_df.drop_duplicates(subset=["agent_name", "question"]) display(sel_df.groupby("agent_name")[["task"]].value_counts()) print("Total length:", len(sel_df), "- is complete:", len(sel_df) == 165) display("Average score:", sel_df.groupby("agent_name")[["is_correct"]].mean().round(3)) display( sel_df.groupby(["agent_name", "task"])[["is_correct", "is_near_correct", "count_steps", "question", "duration"]] .agg( { "is_correct": "mean", "is_near_correct": "mean", "count_steps": "mean", "question": "count", "duration": "mean", } ) .rename(columns={"question": "count"}) ) import plotly.express as px cumulative_df = ( ( sel_df.groupby("agent_name")[["is_correct", "is_near_correct"]] .expanding(min_periods=1, axis=0, method="single") .agg({"is_correct": "mean", "is_near_correct": "count"}) .reset_index() ) .copy() .rename(columns={"is_near_correct": "index"}) ) cumulative_df["index"] = cumulative_df["index"].astype(int) - 1 def find_question(row): try: res = sel_df.loc[sel_df["agent_name"] == row["agent_name"], "question"].iloc[row["index"]][:50] return res except Exception: return "" cumulative_df["question"] = cumulative_df.apply(find_question, axis=1) px.line( cumulative_df, color="agent_name", x="index", y="is_correct", hover_data="question", )<jupyter_output><empty_output><jupyter_text>3. Dive deeper into one run<jupyter_code>sel_df = result_df.loc[result_df["agent_name"] == "o1"] print(len(sel_df))<jupyter_output><empty_output><jupyter_text>Count errors<jupyter_code>import numpy as np error_types = [ "AgentParsingError", "AgentExecutionError", "AgentMaxIterationsError", "AgentGenerationError", ] sel_df[error_types] = 0 sel_df["Count steps"] = np.nan def count_errors(row): if isinstance(row["intermediate_steps"], list): row["Count steps"] = len(row["intermediate_steps"]) for step in row["intermediate_steps"]: if isinstance(step, dict) and "error" in step: try: row[str(step["error"]["error_type"])] += 1 except Exception: pass return row sel_df = sel_df.apply(count_errors, axis=1) import plotly.express as px aggregate_errors = ( sel_df.groupby(["is_correct"])[error_types + ["Count steps"]].mean().reset_index().melt(id_vars=["is_correct"]) ) fig = px.bar( aggregate_errors, y="value", x="variable", color="is_correct", labels={ "agent_name": "<b>Model</b>", "task": "<b>Level</b>", "aggregate_score": "<b>Performance</b>", "value": "<b>Average count</b>", "eval_score_GPT4": "<b>Score</b>", }, ) fig.update_layout( height=500, width=800, barmode="group", bargroupgap=0.0, ) fig.update_traces(textposition="outside") fig.write_image("aggregate_errors.png", scale=3) fig.show()<jupyter_output><empty_output><jupyter_text>Inspect result by file extension type<jupyter_code>display( result_df.groupby(["attachment_type"])[["is_correct", "count_steps", "question"]].agg( {"is_correct": "mean", "count_steps": "mean", "question": "count"} ) )<jupyter_output><empty_output><jupyter_text>4. Ensembling methods<jupyter_code>counts = result_df["agent_name"].value_counts() long_series = result_df.loc[result_df["agent_name"].isin(counts[counts > 140].index)] def majority_vote(df): df = df[(df["prediction"] != "Unable to determine") & (~df["prediction"].isna()) & (df["prediction"] != "None")] answer_modes = df.groupby("question")["prediction"].agg(lambda x: x.mode()[0]).reset_index() first_occurrences = ( df.groupby(["question", "prediction"]).agg({"task": "first", "is_correct": "first"}).reset_index() ) result = answer_modes.merge(first_occurrences, on=["question", "prediction"], how="left") return result def oracle(df): def get_first_correct_or_first_wrong(group): correct_answers = group[group["is_correct"]] if len(correct_answers) > 0: return correct_answers.iloc[0] return group.iloc[0] result = df.groupby("question").apply(get_first_correct_or_first_wrong) return result.reset_index(drop=True) display((long_series.groupby("agent_name")["is_correct"].mean() * 100).round(2)) print(f"Majority score: {majority_vote(long_series)['is_correct'].mean() * 100:.2f}") print(f"Oracle score: {oracle(long_series)['is_correct'].mean() * 100:.2f}")<jupyter_output><empty_output><jupyter_text>Submit<jupyter_code>agent_run = "code_o1_04_february_submission5.jsonl" df = pd.read_json(f"output/validation/{agent_run}", lines=True) df = df[["task_id", "prediction", "intermediate_steps"]] df = df.rename(columns={"prediction": "model_answer", "intermediate_steps": "reasoning_trace"}) df.to_json("submission.jsonl", orient="records", lines=True)<jupyter_output><empty_output>
smolagents/examples/open_deep_research/analysis.ipynb/0
{ "file_path": "smolagents/examples/open_deep_research/analysis.ipynb", "repo_id": "smolagents", "token_count": 3321 }
279
# from huggingface_hub import login # login() import datasets from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.retrievers import BM25Retriever knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train") knowledge_base = knowledge_base.filter(lambda row: row["source"].startswith("huggingface/transformers")) source_docs = [ Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]}) for doc in knowledge_base ] text_splitter = RecursiveCharacterTextSplitter( chunk_size=500, chunk_overlap=50, add_start_index=True, strip_whitespace=True, separators=["\n\n", "\n", ".", " ", ""], ) docs_processed = text_splitter.split_documents(source_docs) from smolagents import Tool class RetrieverTool(Tool): name = "retriever" description = "Uses lexical search to retrieve the parts of transformers documentation that could be most relevant to answer your query." inputs = { "query": { "type": "string", "description": "The query to perform. This should be lexically close to your target documents. Use the affirmative form rather than a question.", } } output_type = "string" def __init__(self, docs, **kwargs): super().__init__(**kwargs) self.retriever = BM25Retriever.from_documents(docs, k=10) def forward(self, query: str) -> str: assert isinstance(query, str), "Your search query must be a string" docs = self.retriever.invoke( query, ) return "\nRetrieved documents:\n" + "".join( [f"\n\n===== Document {str(i)} =====\n" + doc.page_content for i, doc in enumerate(docs)] ) from smolagents import CodeAgent, InferenceClientModel retriever_tool = RetrieverTool(docs_processed) agent = CodeAgent( tools=[retriever_tool], model=InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct"), max_steps=4, verbosity_level=2, stream_outputs=True, ) agent_output = agent.run("For a transformers model training, which is slower, the forward or the backward pass?") print("Final output:") print(agent_output)
smolagents/examples/rag.py/0
{ "file_path": "smolagents/examples/rag.py", "repo_id": "smolagents", "token_count": 818 }
280
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import shutil from pathlib import Path from typing import Generator from smolagents.agent_types import AgentAudio, AgentImage, AgentText from smolagents.agents import MultiStepAgent, PlanningStep from smolagents.memory import ActionStep, FinalAnswerStep from smolagents.models import ChatMessageStreamDelta, MessageRole, agglomerate_stream_deltas from smolagents.utils import _is_package_available def get_step_footnote_content(step_log: ActionStep | PlanningStep, step_name: str) -> str: """Get a footnote string for a step log with duration and token information""" step_footnote = f"**{step_name}**" if step_log.token_usage is not None: step_footnote += f" | Input tokens: {step_log.token_usage.input_tokens:,} | Output tokens: {step_log.token_usage.output_tokens:,}" step_footnote += f" | Duration: {round(float(step_log.timing.duration), 2)}s" if step_log.timing.duration else "" step_footnote_content = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """ return step_footnote_content def _clean_model_output(model_output: str) -> str: """ Clean up model output by removing trailing tags and extra backticks. Args: model_output (`str`): Raw model output. Returns: `str`: Cleaned model output. """ if not model_output: return "" model_output = model_output.strip() # Remove any trailing <end_code> and extra backticks, handling multiple possible formats model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code> model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>``` model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code> return model_output.strip() def _format_code_content(content: str) -> str: """ Format code content as Python code block if it's not already formatted. Args: content (`str`): Code content to format. Returns: `str`: Code content formatted as a Python code block. """ content = content.strip() # Remove existing code blocks and end_code tags content = re.sub(r"```.*?\n", "", content) content = re.sub(r"\s*<end_code>\s*", "", content) content = content.strip() # Add Python code block formatting if not already present if not content.startswith("```python"): content = f"```python\n{content}\n```" return content def _process_action_step(step_log: ActionStep, skip_model_outputs: bool = False) -> Generator: """ Process an [`ActionStep`] and yield appropriate Gradio ChatMessage objects. Args: step_log ([`ActionStep`]): ActionStep to process. skip_model_outputs (`bool`): Whether to skip model outputs. Yields: `gradio.ChatMessage`: Gradio ChatMessages representing the action step. """ import gradio as gr # Output the step number step_number = f"Step {step_log.step_number}" if not skip_model_outputs: yield gr.ChatMessage(role=MessageRole.ASSISTANT, content=f"**{step_number}**", metadata={"status": "done"}) # First yield the thought/reasoning from the LLM if not skip_model_outputs and getattr(step_log, "model_output", ""): model_output = _clean_model_output(step_log.model_output) yield gr.ChatMessage(role=MessageRole.ASSISTANT, content=model_output, metadata={"status": "done"}) # For tool calls, create a parent message if getattr(step_log, "tool_calls", []): first_tool_call = step_log.tool_calls[0] used_code = first_tool_call.name == "python_interpreter" # Process arguments based on type args = first_tool_call.arguments if isinstance(args, dict): content = str(args.get("answer", str(args))) else: content = str(args).strip() # Format code content if needed if used_code: content = _format_code_content(content) # Create the tool call message parent_message_tool = gr.ChatMessage( role=MessageRole.ASSISTANT, content=content, metadata={ "title": f"๐Ÿ› ๏ธ Used tool {first_tool_call.name}", "status": "done", }, ) yield parent_message_tool # Display execution logs if they exist if getattr(step_log, "observations", "") and step_log.observations.strip(): log_content = step_log.observations.strip() if log_content: log_content = re.sub(r"^Execution logs:\s*", "", log_content) yield gr.ChatMessage( role=MessageRole.ASSISTANT, content=f"```bash\n{log_content}\n", metadata={"title": "๐Ÿ“ Execution Logs", "status": "done"}, ) # Display any images in observations if getattr(step_log, "observations_images", []): for image in step_log.observations_images: path_image = AgentImage(image).to_string() yield gr.ChatMessage( role=MessageRole.ASSISTANT, content={"path": path_image, "mime_type": f"image/{path_image.split('.')[-1]}"}, metadata={"title": "๐Ÿ–ผ๏ธ Output Image", "status": "done"}, ) # Handle errors if getattr(step_log, "error", None): yield gr.ChatMessage( role=MessageRole.ASSISTANT, content=str(step_log.error), metadata={"title": "๐Ÿ’ฅ Error", "status": "done"} ) # Add step footnote and separator yield gr.ChatMessage( role=MessageRole.ASSISTANT, content=get_step_footnote_content(step_log, step_number), metadata={"status": "done"}, ) yield gr.ChatMessage(role=MessageRole.ASSISTANT, content="-----", metadata={"status": "done"}) def _process_planning_step(step_log: PlanningStep, skip_model_outputs: bool = False) -> Generator: """ Process a [`PlanningStep`] and yield appropriate gradio.ChatMessage objects. Args: step_log ([`PlanningStep`]): PlanningStep to process. Yields: `gradio.ChatMessage`: Gradio ChatMessages representing the planning step. """ import gradio as gr if not skip_model_outputs: yield gr.ChatMessage(role=MessageRole.ASSISTANT, content="**Planning step**", metadata={"status": "done"}) yield gr.ChatMessage(role=MessageRole.ASSISTANT, content=step_log.plan, metadata={"status": "done"}) yield gr.ChatMessage( role=MessageRole.ASSISTANT, content=get_step_footnote_content(step_log, "Planning step"), metadata={"status": "done"}, ) yield gr.ChatMessage(role=MessageRole.ASSISTANT, content="-----", metadata={"status": "done"}) def _process_final_answer_step(step_log: FinalAnswerStep) -> Generator: """ Process a [`FinalAnswerStep`] and yield appropriate gradio.ChatMessage objects. Args: step_log ([`FinalAnswerStep`]): FinalAnswerStep to process. Yields: `gradio.ChatMessage`: Gradio ChatMessages representing the final answer. """ import gradio as gr final_answer = step_log.output if isinstance(final_answer, AgentText): yield gr.ChatMessage( role=MessageRole.ASSISTANT, content=f"**Final answer:**\n{final_answer.to_string()}\n", metadata={"status": "done"}, ) elif isinstance(final_answer, AgentImage): yield gr.ChatMessage( role=MessageRole.ASSISTANT, content={"path": final_answer.to_string(), "mime_type": "image/png"}, metadata={"status": "done"}, ) elif isinstance(final_answer, AgentAudio): yield gr.ChatMessage( role=MessageRole.ASSISTANT, content={"path": final_answer.to_string(), "mime_type": "audio/wav"}, metadata={"status": "done"}, ) else: yield gr.ChatMessage( role=MessageRole.ASSISTANT, content=f"**Final answer:** {str(final_answer)}", metadata={"status": "done"} ) def pull_messages_from_step(step_log: ActionStep | PlanningStep | FinalAnswerStep, skip_model_outputs: bool = False): """Extract Gradio ChatMessage objects from agent steps with proper nesting. Args: step_log: The step log to display as gr.ChatMessage objects. skip_model_outputs: If True, skip the model outputs when creating the gr.ChatMessage objects: This is used for instance when streaming model outputs have already been displayed. """ if not _is_package_available("gradio"): raise ModuleNotFoundError( "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`" ) if isinstance(step_log, ActionStep): yield from _process_action_step(step_log, skip_model_outputs) elif isinstance(step_log, PlanningStep): yield from _process_planning_step(step_log, skip_model_outputs) elif isinstance(step_log, FinalAnswerStep): yield from _process_final_answer_step(step_log) else: raise ValueError(f"Unsupported step type: {type(step_log)}") def stream_to_gradio( agent, task: str, task_images: list | None = None, reset_agent_memory: bool = False, additional_args: dict | None = None, ) -> Generator: """Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages.""" if not _is_package_available("gradio"): raise ModuleNotFoundError( "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`" ) accumulated_events: list[ChatMessageStreamDelta] = [] for event in agent.run( task, images=task_images, stream=True, reset=reset_agent_memory, additional_args=additional_args ): if isinstance(event, ActionStep | PlanningStep | FinalAnswerStep): for message in pull_messages_from_step( event, # If we're streaming model outputs, no need to display them twice skip_model_outputs=getattr(agent, "stream_outputs", False), ): yield message accumulated_events = [] elif isinstance(event, ChatMessageStreamDelta): accumulated_events.append(event) text = agglomerate_stream_deltas(accumulated_events).render_as_markdown() yield text class GradioUI: """ Gradio interface for interacting with a [`MultiStepAgent`]. This class provides a web interface to interact with the agent in real-time, allowing users to submit prompts, upload files, and receive responses in a chat-like format. It can reset the agent's memory at the start of each interaction if desired. It supports file uploads, which are saved to a specified folder. It uses the [`gradio.Chatbot`] component to display the conversation history. This class requires the `gradio` extra to be installed: `smolagents[gradio]`. Args: agent ([`MultiStepAgent`]): The agent to interact with. file_upload_folder (`str`, *optional*): The folder where uploaded files will be saved. If not provided, file uploads are disabled. reset_agent_memory (`bool`, *optional*, defaults to `False`): Whether to reset the agent's memory at the start of each interaction. If `True`, the agent will not remember previous interactions. Raises: ModuleNotFoundError: If the `gradio` extra is not installed. Example: ```python from smolagents import CodeAgent, GradioUI, InferenceClientModel model = InferenceClientModel(model_id="meta-llama/Meta-Llama-3.1-8B-Instruct") agent = CodeAgent(tools=[], model=model) gradio_ui = GradioUI(agent, file_upload_folder="uploads", reset_agent_memory=True) gradio_ui.launch() ``` """ def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None, reset_agent_memory: bool = False): if not _is_package_available("gradio"): raise ModuleNotFoundError( "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`" ) self.agent = agent self.file_upload_folder = Path(file_upload_folder) if file_upload_folder is not None else None self.reset_agent_memory = reset_agent_memory self.name = getattr(agent, "name") or "Agent interface" self.description = getattr(agent, "description", None) if self.file_upload_folder is not None: if not self.file_upload_folder.exists(): self.file_upload_folder.mkdir(parents=True, exist_ok=True) def interact_with_agent(self, prompt, messages, session_state): import gradio as gr # Get the agent type from the template agent if "agent" not in session_state: session_state["agent"] = self.agent try: messages.append(gr.ChatMessage(role="user", content=prompt, metadata={"status": "done"})) yield messages for msg in stream_to_gradio( session_state["agent"], task=prompt, reset_agent_memory=self.reset_agent_memory ): if isinstance(msg, gr.ChatMessage): messages[-1].metadata["status"] = "done" messages.append(msg) elif isinstance(msg, str): # Then it's only a completion delta msg = msg.replace("<", r"\<").replace(">", r"\>") # HTML tags seem to break Gradio Chatbot if messages[-1].metadata["status"] == "pending": messages[-1].content = msg else: messages.append( gr.ChatMessage(role=MessageRole.ASSISTANT, content=msg, metadata={"status": "pending"}) ) yield messages yield messages except Exception as e: yield messages raise gr.Error(f"Error in interaction: {str(e)}") def upload_file(self, file, file_uploads_log, allowed_file_types=None): """ Upload a file and add it to the list of uploaded files in the session state. The file is saved to the `self.file_upload_folder` folder. If the file type is not allowed, it returns a message indicating the disallowed file type. Args: file (`gradio.File`): The uploaded file. file_uploads_log (`list`): A list to log uploaded files. allowed_file_types (`list`, *optional*): List of allowed file extensions. Defaults to [".pdf", ".docx", ".txt"]. """ import gradio as gr if file is None: return gr.Textbox(value="No file uploaded", visible=True), file_uploads_log if allowed_file_types is None: allowed_file_types = [".pdf", ".docx", ".txt"] file_ext = os.path.splitext(file.name)[1].lower() if file_ext not in allowed_file_types: return gr.Textbox("File type disallowed", visible=True), file_uploads_log # Sanitize file name original_name = os.path.basename(file.name) sanitized_name = re.sub( r"[^\w\-.]", "_", original_name ) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores # Save the uploaded file to the specified folder file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name)) shutil.copy(file.name, file_path) return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path] def log_user_message(self, text_input, file_uploads_log): import gradio as gr return ( text_input + ( f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}" if len(file_uploads_log) > 0 else "" ), "", gr.Button(interactive=False), ) def launch(self, share: bool = True, **kwargs): """ Launch the Gradio app with the agent interface. Args: share (`bool`, defaults to `True`): Whether to share the app publicly. **kwargs: Additional keyword arguments to pass to the Gradio launch method. """ self.create_app().launch(debug=True, share=share, **kwargs) def create_app(self): import gradio as gr with gr.Blocks(theme="ocean", fill_height=True) as demo: # Add session state to store session-specific data session_state = gr.State({}) stored_messages = gr.State([]) file_uploads_log = gr.State([]) with gr.Sidebar(): gr.Markdown( f"# {self.name.replace('_', ' ').capitalize()}" "\n> This web ui allows you to interact with a `smolagents` agent that can use tools and execute steps to complete tasks." + (f"\n\n**Agent description:**\n{self.description}" if self.description else "") ) with gr.Group(): gr.Markdown("**Your request**", container=True) text_input = gr.Textbox( lines=3, label="Chat Message", container=False, placeholder="Enter your prompt here and press Shift+Enter or press the button", ) submit_btn = gr.Button("Submit", variant="primary") # If an upload folder is provided, enable the upload feature if self.file_upload_folder is not None: upload_file = gr.File(label="Upload a file") upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False) upload_file.change( self.upload_file, [upload_file, file_uploads_log], [upload_status, file_uploads_log], ) gr.HTML( "<br><br><h4><center>Powered by <a target='_blank' href='https://github.com/huggingface/smolagents'><b>smolagents</b></a></center></h4>" ) # Main chat interface chatbot = gr.Chatbot( label="Agent", type="messages", avatar_images=( None, "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png", ), resizeable=True, scale=1, latex_delimiters=[ {"left": r"$$", "right": r"$$", "display": True}, {"left": r"$", "right": r"$", "display": False}, {"left": r"\[", "right": r"\]", "display": True}, {"left": r"\(", "right": r"\)", "display": False}, ], ) # Set up event handlers text_input.submit( self.log_user_message, [text_input, file_uploads_log], [stored_messages, text_input, submit_btn], ).then(self.interact_with_agent, [stored_messages, chatbot, session_state], [chatbot]).then( lambda: ( gr.Textbox( interactive=True, placeholder="Enter your prompt here and press Shift+Enter or the button" ), gr.Button(interactive=True), ), None, [text_input, submit_btn], ) submit_btn.click( self.log_user_message, [text_input, file_uploads_log], [stored_messages, text_input, submit_btn], ).then(self.interact_with_agent, [stored_messages, chatbot, session_state], [chatbot]).then( lambda: ( gr.Textbox( interactive=True, placeholder="Enter your prompt here and press Shift+Enter or the button" ), gr.Button(interactive=True), ), None, [text_input, submit_btn], ) chatbot.clear(self.agent.memory.reset) return demo __all__ = ["stream_to_gradio", "GradioUI"]
smolagents/src/smolagents/gradio_ui.py/0
{ "file_path": "smolagents/src/smolagents/gradio_ui.py", "repo_id": "smolagents", "token_count": 9065 }
281
import io from textwrap import dedent from unittest.mock import MagicMock, patch import docker import PIL.Image import pytest from rich.console import Console from smolagents.default_tools import FinalAnswerTool, WikipediaSearchTool from smolagents.monitoring import AgentLogger, LogLevel from smolagents.remote_executors import DockerExecutor, E2BExecutor, RemotePythonExecutor, WasmExecutor from smolagents.utils import AgentError from .utils.markers import require_run_all class TestRemotePythonExecutor: def test_send_tools_empty_tools(self): executor = RemotePythonExecutor(additional_imports=[], logger=MagicMock()) executor.run_code_raise_errors = MagicMock() executor.send_tools({}) assert executor.run_code_raise_errors.call_count == 1 # No new packages should be installed assert "!pip install" not in executor.run_code_raise_errors.call_args.args[0] def test_send_variables_with_empty_dict_is_noop(self): executor = RemotePythonExecutor(additional_imports=[], logger=MagicMock()) executor.run_code_raise_errors = MagicMock() executor.send_variables({}) assert executor.run_code_raise_errors.call_count == 0 @require_run_all def test_send_tools_with_default_wikipedia_search_tool(self): tool = WikipediaSearchTool() executor = RemotePythonExecutor(additional_imports=[], logger=MagicMock()) executor.run_code_raise_errors = MagicMock() executor.send_tools({"wikipedia_search": tool}) assert executor.run_code_raise_errors.call_count == 2 assert "!pip install wikipedia-api" == executor.run_code_raise_errors.call_args_list[0].args[0] assert "class WikipediaSearchTool(Tool)" in executor.run_code_raise_errors.call_args_list[1].args[0] class TestE2BExecutorUnit: def test_e2b_executor_instantiation(self): logger = MagicMock() with patch("e2b_code_interpreter.Sandbox") as mock_sandbox: mock_sandbox.return_value.commands.run.return_value.error = None mock_sandbox.return_value.run_code.return_value.error = None executor = E2BExecutor( additional_imports=[], logger=logger, api_key="dummy-api-key", template="dummy-template-id", timeout=60 ) assert isinstance(executor, E2BExecutor) assert executor.logger == logger assert executor.sandbox == mock_sandbox.return_value assert mock_sandbox.call_count == 1 assert mock_sandbox.call_args.kwargs == { "api_key": "dummy-api-key", "template": "dummy-template-id", "timeout": 60, } def test_cleanup(self): """Test that the cleanup method properly shuts down the sandbox""" logger = MagicMock() with patch("e2b_code_interpreter.Sandbox") as mock_sandbox: # Setup mock mock_sandbox.return_value.kill = MagicMock() # Create executor executor = E2BExecutor(additional_imports=[], logger=logger, api_key="dummy-api-key") # Call cleanup executor.cleanup() # Verify sandbox was killed mock_sandbox.return_value.kill.assert_called_once() assert logger.log.call_count >= 2 # Should log start and completion messages @pytest.fixture def e2b_executor(): executor = E2BExecutor( additional_imports=["pillow", "numpy"], logger=AgentLogger(LogLevel.INFO, Console(force_terminal=False, file=io.StringIO())), ) yield executor executor.cleanup() @require_run_all class TestE2BExecutorIntegration: @pytest.fixture(autouse=True) def set_executor(self, e2b_executor): self.executor = e2b_executor @pytest.mark.parametrize( "code_action, expected_result", [ ( dedent(''' final_answer("""This is a multiline final answer""") '''), "This is\na multiline\nfinal answer", ), ( dedent(""" text = '''Text containing final_answer(5) ''' final_answer(text) """), "Text containing\nfinal_answer(5)\n", ), ( dedent(""" num = 2 if num == 1: final_answer("One") elif num == 2: final_answer("Two") """), "Two", ), ], ) def test_final_answer_patterns(self, code_action, expected_result): self.executor.send_tools({"final_answer": FinalAnswerTool()}) code_output = self.executor(code_action) assert code_output.is_final_answer is True assert code_output.output == expected_result def test_custom_final_answer(self): class CustomFinalAnswerTool(FinalAnswerTool): def forward(self, answer: str) -> str: return "CUSTOM" + answer self.executor.send_tools({"final_answer": CustomFinalAnswerTool()}) code_action = dedent(""" final_answer(answer="_answer") """) code_output = self.executor(code_action) assert code_output.is_final_answer is True assert code_output.output == "CUSTOM_answer" def test_custom_final_answer_with_custom_inputs(self): class CustomFinalAnswerToolWithCustomInputs(FinalAnswerTool): inputs = { "answer1": {"type": "string", "description": "First part of the answer."}, "answer2": {"type": "string", "description": "Second part of the answer."}, } def forward(self, answer1: str, answer2: str) -> str: return answer1 + "CUSTOM" + answer2 self.executor.send_tools({"final_answer": CustomFinalAnswerToolWithCustomInputs()}) code_action = dedent(""" final_answer( answer1="answer1_", answer2="_answer2" ) """) code_output = self.executor(code_action) assert code_output.is_final_answer is True assert code_output.output == "answer1_CUSTOM_answer2" class TestDockerExecutorUnit: def test_cleanup(self): """Test that cleanup properly stops and removes the container""" logger = MagicMock() with ( patch("docker.from_env") as mock_docker_client, patch("requests.post") as mock_post, patch("websocket.create_connection"), ): # Setup mocks mock_container = MagicMock() mock_container.status = "running" mock_container.short_id = "test123" mock_docker_client.return_value.containers.run.return_value = mock_container mock_docker_client.return_value.images.get.return_value = MagicMock() mock_post.return_value.status_code = 201 mock_post.return_value.json.return_value = {"id": "test-kernel-id"} # Create executor executor = DockerExecutor(additional_imports=[], logger=logger, build_new_image=False) # Call cleanup executor.cleanup() # Verify container was stopped and removed mock_container.stop.assert_called_once() mock_container.remove.assert_called_once() @pytest.fixture def docker_executor(): executor = DockerExecutor( additional_imports=["pillow", "numpy"], logger=AgentLogger(LogLevel.INFO, Console(force_terminal=False, file=io.StringIO())), ) yield executor executor.delete() @require_run_all class TestDockerExecutorIntegration: @pytest.fixture(autouse=True) def set_executor(self, docker_executor): self.executor = docker_executor def test_initialization(self): """Check if DockerExecutor initializes without errors""" assert self.executor.container is not None, "Container should be initialized" def test_state_persistence(self): """Test that variables and imports form one snippet persist in the next""" code_action = "import numpy as np; a = 2" self.executor(code_action) code_action = "print(np.sqrt(a))" code_output = self.executor(code_action) assert "1.41421" in code_output.logs def test_execute_output(self): """Test execution that returns a string""" self.executor.send_tools({"final_answer": FinalAnswerTool()}) code_action = 'final_answer("This is the final answer")' code_output = self.executor(code_action) assert code_output.output == "This is the final answer", "Result should be 'This is the final answer'" def test_execute_multiline_output(self): """Test execution that returns a string""" self.executor.send_tools({"final_answer": FinalAnswerTool()}) code_action = 'result = "This is the final answer"\nfinal_answer(result)' code_output = self.executor(code_action) assert code_output.output == "This is the final answer", "Result should be 'This is the final answer'" def test_execute_image_output(self): """Test execution that returns a base64 image""" self.executor.send_tools({"final_answer": FinalAnswerTool()}) code_action = dedent(""" import base64 from PIL import Image from io import BytesIO image = Image.new("RGB", (10, 10), (255, 0, 0)) final_answer(image) """) code_output = self.executor(code_action) assert isinstance(code_output.output, PIL.Image.Image), "Result should be a PIL Image" def test_syntax_error_handling(self): """Test handling of syntax errors""" code_action = 'print("Missing Parenthesis' # Syntax error with pytest.raises(AgentError) as exception_info: self.executor(code_action) assert "SyntaxError" in str(exception_info.value), "Should raise a syntax error" def test_cleanup_on_deletion(self): """Test if Docker container stops and removes on deletion""" container_id = self.executor.container.id self.executor.delete() # Trigger cleanup client = docker.from_env() containers = [c.id for c in client.containers.list(all=True)] assert container_id not in containers, "Container should be removed" @pytest.mark.parametrize( "code_action, expected_result", [ ( dedent(''' final_answer("""This is a multiline final answer""") '''), "This is\na multiline\nfinal answer", ), ( dedent(""" text = '''Text containing final_answer(5) ''' final_answer(text) """), "Text containing\nfinal_answer(5)\n", ), ( dedent(""" num = 2 if num == 1: final_answer("One") elif num == 2: final_answer("Two") """), "Two", ), ], ) def test_final_answer_patterns(self, code_action, expected_result): self.executor.send_tools({"final_answer": FinalAnswerTool()}) code_output = self.executor(code_action) assert code_output.is_final_answer is True assert code_output.output == expected_result def test_custom_final_answer(self): class CustomFinalAnswerTool(FinalAnswerTool): def forward(self, answer: str) -> str: return "CUSTOM" + answer self.executor.send_tools({"final_answer": CustomFinalAnswerTool()}) code_action = dedent(""" final_answer(answer="_answer") """) code_output = self.executor(code_action) assert code_output.is_final_answer is True assert code_output.output == "CUSTOM_answer" def test_custom_final_answer_with_custom_inputs(self): class CustomFinalAnswerToolWithCustomInputs(FinalAnswerTool): inputs = { "answer1": {"type": "string", "description": "First part of the answer."}, "answer2": {"type": "string", "description": "Second part of the answer."}, } def forward(self, answer1: str, answer2: str) -> str: return answer1 + "CUSTOM" + answer2 self.executor.send_tools({"final_answer": CustomFinalAnswerToolWithCustomInputs()}) code_action = dedent(""" final_answer( answer1="answer1_", answer2="_answer2" ) """) code_output = self.executor(code_action) assert code_output.is_final_answer is True assert code_output.output == "answer1_CUSTOM_answer2" class TestWasmExecutorUnit: def test_wasm_executor_instantiation(self): logger = MagicMock() # Mock subprocess.run to simulate Deno being installed with ( patch("subprocess.run") as mock_run, patch("subprocess.Popen") as mock_popen, patch("requests.get") as mock_get, patch("time.sleep"), ): # Configure mocks mock_run.return_value.returncode = 0 mock_process = MagicMock() mock_process.poll.return_value = None mock_popen.return_value = mock_process mock_get.return_value.status_code = 200 # Create the executor executor = WasmExecutor(additional_imports=["numpy", "pandas"], logger=logger, timeout=30) # Verify the executor was created correctly assert isinstance(executor, WasmExecutor) assert executor.logger == logger assert executor.timeout == 30 assert "numpy" in executor.installed_packages assert "pandas" in executor.installed_packages # Verify Deno was checked assert mock_run.call_count == 1 assert mock_run.call_args.args[0][0] == "deno" assert mock_run.call_args.args[0][1] == "--version" # Verify server was started assert mock_popen.call_count == 1 assert mock_popen.call_args.args[0][0] == "deno" assert mock_popen.call_args.args[0][1] == "run" # Clean up with patch("shutil.rmtree"): executor.cleanup() @require_run_all class TestWasmExecutorIntegration: """ Integration tests for WasmExecutor. These tests require Deno to be installed on the system. Skip these tests if you don't have Deno installed. """ @pytest.fixture(autouse=True) def setup_and_teardown(self): """Setup and teardown for each test.""" try: # Check if Deno is installed import subprocess subprocess.run(["deno", "--version"], capture_output=True, check=True) # Create the executor self.executor = WasmExecutor( additional_imports=["numpy", "pandas"], logger=AgentLogger(LogLevel.INFO, Console(force_terminal=False, file=io.StringIO())), timeout=60, ) yield # Clean up self.executor.cleanup() except (subprocess.SubprocessError, FileNotFoundError): pytest.skip("Deno is not installed, skipping integration tests") def test_basic_execution(self): """Test basic code execution.""" code = "a = 2 + 2; print(f'Result: {a}')" code_output = self.executor(code) assert "Result: 4" in code_output.logs def test_state_persistence(self): """Test that variables persist between executions.""" # Define a variable self.executor("x = 42") # Use the variable in a subsequent execution code_output = self.executor("print(x)") assert "42" in code_output.logs def test_final_answer(self): """Test returning a final answer.""" self.executor.send_tools({"final_answer": FinalAnswerTool()}) code = 'final_answer("This is the final answer")' code_output = self.executor(code) assert code_output.output == "This is the final answer" assert code_output.is_final_answer is True def test_numpy_execution(self): """Test execution with NumPy.""" code = """ import numpy as np arr = np.array([1, 2, 3, 4, 5]) print(f"Mean: {np.mean(arr)}") """ code_output = self.executor(code) assert "Mean: 3.0" in code_output.logs def test_error_handling(self): """Test handling of Python errors.""" code = "1/0" # Division by zero with pytest.raises(AgentError) as excinfo: self.executor(code) assert "ZeroDivisionError" in str(excinfo.value) def test_syntax_error_handling(self): """Test handling of syntax errors.""" code = "print('Missing parenthesis" # Missing closing parenthesis with pytest.raises(AgentError) as excinfo: self.executor(code) assert "SyntaxError" in str(excinfo.value)
smolagents/tests/test_remote_executors.py/0
{ "file_path": "smolagents/tests/test_remote_executors.py", "repo_id": "smolagents", "token_count": 7884 }
282
# Rust builder FROM lukemathwalker/cargo-chef:latest-rust-1.85.1 AS chef WORKDIR /usr/src ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse FROM chef AS planner COPY Cargo.lock Cargo.lock COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY backends backends COPY launcher launcher RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ python3.11-dev RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \ unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \ rm -f $PROTOC_ZIP COPY --from=planner /usr/src/recipe.json recipe.json RUN cargo chef cook --profile release-opt --recipe-path recipe.json ARG GIT_SHA ARG DOCKER_LABEL COPY Cargo.lock Cargo.lock COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY backends backends COPY launcher launcher RUN cargo build --profile release-opt --frozen # Python builder # Adapted from: https://github.com/pytorch/pytorch/blob/master/Dockerfile FROM nvidia/cuda:12.4.1-devel-ubuntu22.04 AS pytorch-install WORKDIR /usr/src/ # NOTE: When updating PyTorch version, beware to remove `pip install nvidia-nccl-cu12==2.22.3` below in the Dockerfile. Context: https://github.com/huggingface/text-generation-inference/pull/2099 ARG PYTORCH_VERSION=2.7 ARG PYTHON_VERSION=3.11 # Keep in sync with `server/pyproject.toml # Automatically set by buildx ARG TARGETPLATFORM RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ build-essential \ ca-certificates \ ccache \ curl \ git && \ rm -rf /var/lib/apt/lists/* COPY --from=ghcr.io/astral-sh/uv:0.5.31 /uv /uvx /bin/ ENV PATH="$PATH:/root/.local/bin" RUN uv python install ${PYTHON_VERSION} RUN uv venv --python ${PYTHON_VERSION} && uv pip install torch==${PYTORCH_VERSION} torchvision pip setuptools packaging ENV VIRTUAL_ENV=/usr/src/.venv/ ENV PATH="$PATH:/usr/src/.venv/bin/" # CUDA kernels builder image FROM pytorch-install AS kernel-builder ARG MAX_JOBS=8 ENV TORCH_CUDA_ARCH_LIST="8.0;8.6;9.0+PTX" RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ ninja-build cmake \ && rm -rf /var/lib/apt/lists/* # Build Flash Attention CUDA kernels FROM kernel-builder AS flash-att-builder WORKDIR /usr/src COPY server/Makefile-flash-att Makefile # Build specific version of flash attention RUN . .venv/bin/activate && make build-flash-attention # Build Flash Attention v2 CUDA kernels FROM kernel-builder AS flash-att-v2-builder WORKDIR /usr/src COPY server/Makefile-flash-att-v2 Makefile # Build specific version of flash attention v2 RUN . .venv/bin/activate && make build-flash-attention-v2-cuda # Build Transformers exllama kernels FROM kernel-builder AS exllama-kernels-builder WORKDIR /usr/src COPY server/exllama_kernels/ . RUN . .venv/bin/activate && python setup.py build # Build Transformers exllama kernels FROM kernel-builder AS exllamav2-kernels-builder WORKDIR /usr/src COPY server/Makefile-exllamav2/ Makefile # Build specific version of transformers RUN . .venv/bin/activate && make build-exllamav2 # Build Transformers awq kernels FROM kernel-builder AS awq-kernels-builder WORKDIR /usr/src COPY server/Makefile-awq Makefile # Build specific version of transformers RUN . .venv/bin/activate && make build-awq # Build Transformers CUDA kernels FROM kernel-builder AS custom-kernels-builder WORKDIR /usr/src COPY server/custom_kernels/ . # Build specific version of transformers RUN . .venv/bin/activate && python setup.py build # Build mamba kernels FROM kernel-builder AS mamba-builder WORKDIR /usr/src COPY server/Makefile-selective-scan Makefile RUN . .venv/bin/activate && make build-all # Build flashinfer FROM kernel-builder AS flashinfer-builder WORKDIR /usr/src COPY server/Makefile-flashinfer Makefile RUN . .venv/bin/activate && make install-flashinfer # Text Generation Inference base image FROM nvidia/cuda:12.4.0-base-ubuntu22.04 AS base # Text Generation Inference base env ENV HF_HOME=/data \ HF_HUB_ENABLE_HF_TRANSFER=1 \ PORT=80 WORKDIR /usr/src RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ libssl-dev \ ca-certificates \ make \ curl \ git \ && rm -rf /var/lib/apt/lists/* # RUN curl -LsSf https://astral.sh/uv/install.sh | sh # ENV PATH="$PATH:/root/.local/bin" COPY --from=ghcr.io/astral-sh/uv:0.5.31 /uv /uvx /bin/ # Install flash-attention dependencies # RUN pip install einops --no-cache-dir # Copy env with PyTorch installed COPY --from=pytorch-install /usr/src/.venv /usr/src/.venv ENV PYTHON_VERSION=3.11 RUN uv python install ${PYTHON_VERSION} ENV VIRTUAL_ENV=/usr/src/.venv/ ENV PATH="$PATH:/usr/src/.venv/bin/" # Install server COPY proto proto COPY server server COPY server/Makefile server/Makefile ENV HF_KERNELS_CACHE=/kernels RUN cd server && \ uv sync --frozen --extra gen --extra bnb --extra accelerate --extra compressed-tensors --extra quantize --extra peft --extra outlines --extra torch --no-install-project --active && \ make gen-server-raw && \ kernels download . RUN cd server && \ uv sync --frozen --extra gen --extra bnb --extra accelerate --extra compressed-tensors --extra quantize --extra peft --extra outlines --extra torch --active --python=${PYTHON_VERSION} && \ uv pip install nvidia-nccl-cu12==2.25.1 && \ pwd && \ text-generation-server --help # Copy build artifacts from flash attention builder COPY --from=flash-att-builder /usr/src/flash-attention/build/lib.linux-x86_64-cpython-311 /usr/src/.venv/lib/python3.11/site-packages COPY --from=flash-att-builder /usr/src/flash-attention/csrc/layer_norm/build/lib.linux-x86_64-cpython-311 /usr/src/.venv/lib/python3.11/site-packages COPY --from=flash-att-builder /usr/src/flash-attention/csrc/rotary/build/lib.linux-x86_64-cpython-311 /usr/src/.venv/lib/python3.11/site-packages # Copy build artifacts from flash attention v2 builder COPY --from=flash-att-v2-builder /usr/src/.venv/lib/python3.11/site-packages/flash_attn_2_cuda.cpython-311-x86_64-linux-gnu.so /usr/src/.venv/lib/python3.11/site-packages # Copy build artifacts from custom kernels builder COPY --from=custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-311 /usr/src/.venv/lib/python3.11/site-packages # Copy build artifacts from exllama kernels builder COPY --from=exllama-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-311 /usr/src/.venv/lib/python3.11/site-packages # Copy build artifacts from exllamav2 kernels builder COPY --from=exllamav2-kernels-builder /usr/src/exllamav2/build/lib.linux-x86_64-cpython-311 /usr/src/.venv/lib/python3.11/site-packages # Copy build artifacts from awq kernels builder COPY --from=awq-kernels-builder /usr/src/llm-awq/awq/kernels/build/lib.linux-x86_64-cpython-311 /usr/src/.venv/lib/python3.11/site-packages # Copy build artifacts from mamba builder COPY --from=mamba-builder /usr/src/mamba/build/lib.linux-x86_64-cpython-311/ /usr/src/.venv/lib/python3.11/site-packages COPY --from=mamba-builder /usr/src/causal-conv1d/build/lib.linux-x86_64-cpython-311/ /usr/src/.venv/lib/python3.11/site-packages COPY --from=flashinfer-builder /usr/src/.venv/lib/python3.11/site-packages/flashinfer/ /usr/src/.venv/lib/python3.11/site-packages/flashinfer/ # ENV LD_PRELOAD=/opt/conda/lib/python3.11/site-packages/nvidia/nccl/lib/libnccl.so.2 # Required to find libpython within the rust binaries # This is needed because exl2 tries to load flash-attn # And fails with our builds. ENV EXLLAMA_NO_FLASH_ATTN=1 # Deps before the binaries # The binaries change on every build given we burn the SHA into them # The deps change less often. RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ build-essential \ g++ \ && rm -rf /var/lib/apt/lists/* # Install benchmarker COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router COPY --from=builder /usr/src/target/release-opt/text-generation-router /usr/local/bin/text-generation-router # Install launcher COPY --from=builder /usr/src/target/release-opt/text-generation-launcher /usr/local/bin/text-generation-launcher # AWS Sagemaker compatible image FROM base AS sagemaker COPY sagemaker-entrypoint.sh entrypoint.sh RUN chmod +x entrypoint.sh ENTRYPOINT ["./entrypoint.sh"] # Final image FROM base COPY ./tgi-entrypoint.sh /tgi-entrypoint.sh RUN chmod +x /tgi-entrypoint.sh ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/root/.local/share/uv/python/cpython-3.11.11-linux-x86_64-gnu/lib/" ENTRYPOINT ["/tgi-entrypoint.sh"] # CMD ["--json-output"]
text-generation-inference/Dockerfile/0
{ "file_path": "text-generation-inference/Dockerfile", "repo_id": "text-generation-inference", "token_count": 3446 }
283
use std::fs; fn main() -> Result<(), Box<dyn std::error::Error>> { println!("cargo:rerun-if-changed=../../proto/"); fs::create_dir_all("src/v2/pb").unwrap_or(()); let mut config = prost_build::Config::new(); config.protoc_arg("--experimental_allow_proto3_optional"); tonic_build::configure() .build_client(true) .build_server(false) .out_dir("src/v2/pb") .include_file("mod.rs") .compile_with_config(config, &["../../proto/generate.proto"], &["../../proto"]) .map_err(|e| match e.kind(){ std::io::ErrorKind::NotFound => {panic!("`protoc` not found, install libprotoc")}, std::io::ErrorKind::Other => {panic!("`protoc` version unsupported, upgrade protoc: https://github.com/protocolbuffers/protobuf/releases")}, e => {e} }).unwrap_or_else(|e| panic!("protobuf compilation failed: {e}")); fs::create_dir_all("src/v3/pb").unwrap_or(()); let mut config = prost_build::Config::new(); config.protoc_arg("--experimental_allow_proto3_optional"); tonic_build::configure() .build_client(true) .build_server(false) .out_dir("src/v3/pb") .include_file("mod.rs") .compile_with_config(config, &["../../proto/v3/generate.proto"], &["../../proto"]) .unwrap_or_else(|e| panic!("protobuf compilation failed: {e}")); Ok(()) }
text-generation-inference/backends/client/build.rs/0
{ "file_path": "text-generation-inference/backends/client/build.rs", "repo_id": "text-generation-inference", "token_count": 624 }
284
flash_att_commit := 3a9bfd076f98746c73362328958dbc68d145fbec build-flash-attention: if [ ! -d 'flash-attention' ]; then \ pip install -U packaging ninja --no-cache-dir && \ git clone https://github.com/HazyResearch/flash-attention.git; \ fi cd flash-attention && git fetch && git checkout $(flash_att_commit) && \ MAX_JOBS=8 python setup.py build && cd csrc/layer_norm && python setup.py build && cd ../rotary && python setup.py build install-flash-attention: build-flash-attention cd flash-attention && git checkout $(flash_att_commit) && MAX_JOBS=8 python setup.py install && cd csrc/layer_norm && python setup.py install && cd ../rotary && python setup.py install
text-generation-inference/backends/gaudi/server/Makefile-flash-att/0
{ "file_path": "text-generation-inference/backends/gaudi/server/Makefile-flash-att", "repo_id": "text-generation-inference", "token_count": 231 }
285
import os import sys import typer from pathlib import Path from loguru import logger from typing import Optional from enum import Enum from huggingface_hub import hf_hub_download from text_generation_server.utils.adapter import parse_lora_adapters app = typer.Typer() class Quantization(str, Enum): gptq = "gptq" awq = "awq" fp8 = "fp8" compressed_tensors = "compressed-tensors" class Dtype(str, Enum): float16 = "float16" bloat16 = "bfloat16" class KVCacheDtype(str, Enum): fp8_e4m3fn = "fp8_e4m3fn" fp8_e5m2 = "fp8_e5m2" @app.command() def serve( model_id: str, revision: Optional[str] = None, sharded: bool = False, quantize: Optional[Quantization] = None, speculate: Optional[int] = None, dtype: Optional[Dtype] = None, kv_cache_dtype: Optional[KVCacheDtype] = None, trust_remote_code: bool = False, uds_path: Path = "/tmp/text-generation-server", logger_level: str = "INFO", json_output: bool = False, otlp_endpoint: Optional[str] = None, otlp_service_name: str = "text-generation-inference.server", max_input_tokens: Optional[int] = None, ): if sharded: # assert ( # os.getenv("RANK", None) is not None # ), "RANK must be set when sharded is True" assert ( os.getenv("WORLD_SIZE", None) is not None ), "WORLD_SIZE must be set when sharded is True" assert ( os.getenv("MASTER_ADDR", None) is not None ), "MASTER_ADDR must be set when sharded is True" assert ( os.getenv("MASTER_PORT", None) is not None ), "MASTER_PORT must be set when sharded is True" # Remove default handler logger.remove() logger.add( sys.stdout, format="{message}", filter="text_generation_server", level=logger_level, serialize=json_output, backtrace=True, diagnose=False, ) # Import here after the logger is added to log potential import exceptions from text_generation_server import server from text_generation_server.tracing import setup_tracing # Setup OpenTelemetry distributed tracing if otlp_endpoint is not None: setup_tracing(otlp_service_name=otlp_service_name, otlp_endpoint=otlp_endpoint) lora_adapters = parse_lora_adapters(os.getenv("LORA_ADAPTERS")) # TODO: enable lora with cuda graphs. for now disable cuda graphs if lora is enabled # and warn the user if lora_adapters: logger.warning("LoRA adapters enabled (experimental feature).") if "CUDA_GRAPHS" in os.environ: logger.warning( "LoRA adapters incompatible with CUDA Graphs. Disabling CUDA Graphs." ) global CUDA_GRAPHS CUDA_GRAPHS = None # Downgrade enum into str for easier management later on quantize = None if quantize is None else quantize.value dtype = "bfloat16" if dtype is None else dtype.value kv_cache_dtype = None if kv_cache_dtype is None else kv_cache_dtype.value logger.info(f"quantize={quantize} kv_cache_dtype={kv_cache_dtype}") if dtype is not None and quantize not in { None, "bitsandbytes", "bitsandbytes-nf4", "bitsandbytes-fp4", "gptq", "awq", "fp8", "compressed-tensors", }: raise RuntimeError( "Only 1 can be set between `dtype` and `quantize`, as they both decide how goes the final model." ) server.serve( model_id, lora_adapters, revision, sharded, quantize, speculate, dtype, kv_cache_dtype, trust_remote_code, uds_path, max_input_tokens, ) @app.command() def download_weights( model_id: str, revision: Optional[str] = None, extension: str = ".safetensors", auto_convert: bool = True, logger_level: str = "INFO", json_output: bool = False, trust_remote_code: bool = False, merge_lora: bool = False, ): # Remove default handler logger.remove() logger.add( sys.stdout, format="{message}", filter="text_generation_server", level=logger_level, serialize=json_output, backtrace=True, diagnose=False, ) # Import here after the logger is added to log potential import exceptions from text_generation_server import utils # Test if files were already download try: utils.weight_files(model_id, revision, extension) logger.info("Files are already present on the host. " "Skipping download.") return # Local files not found except (utils.LocalEntryNotFoundError, FileNotFoundError, utils.EntryNotFoundError): pass is_local_model = (Path(model_id).exists() and Path(model_id).is_dir()) or os.getenv( "WEIGHTS_CACHE_OVERRIDE", None ) is not None if not is_local_model: # TODO: maybe reverse the default value of merge_lora? # currently by default we don't merge the weights with the base model if merge_lora: try: hf_hub_download( model_id, revision=revision, filename="adapter_config.json" ) utils.download_and_unload_peft( model_id, revision, trust_remote_code=trust_remote_code ) is_local_model = True utils.weight_files(model_id, revision, extension) return except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): pass else: try: utils.peft.download_peft( model_id, revision, trust_remote_code=trust_remote_code ) except Exception: pass try: import json config = hf_hub_download( model_id, revision=revision, filename="config.json" ) with open(config, "r") as f: config = json.load(f) base_model_id = config.get("base_model_name_or_path", None) if base_model_id and base_model_id != model_id: try: logger.info(f"Downloading parent model {base_model_id}") download_weights( model_id=base_model_id, revision="main", extension=extension, auto_convert=auto_convert, logger_level=logger_level, json_output=json_output, trust_remote_code=trust_remote_code, ) except Exception: pass except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): pass # Try to download weights from the hub try: filenames = utils.weight_hub_files(model_id, revision, extension) utils.download_weights(filenames, model_id, revision) # Successfully downloaded weights return # No weights found on the hub with this extension except utils.EntryNotFoundError as e: # Check if we want to automatically convert to safetensors or if we can use .bin weights instead if not extension == ".safetensors" or not auto_convert: raise e elif (Path(model_id) / "adapter_config.json").exists(): # Try to load as a local PEFT model try: utils.download_and_unload_peft( model_id, revision, trust_remote_code=trust_remote_code ) utils.weight_files(model_id, revision, extension) return except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): pass elif (Path(model_id) / "config.json").exists(): # Try to load as a local Medusa model try: import json config = Path(model_id) / "config.json" with open(config, "r") as f: config = json.load(f) base_model_id = config.get("base_model_name_or_path", None) if base_model_id: try: logger.info(f"Downloading parent model {base_model_id}") download_weights( model_id=base_model_id, revision="main", extension=extension, auto_convert=auto_convert, logger_level=logger_level, json_output=json_output, trust_remote_code=trust_remote_code, ) except Exception: pass except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): pass # Try to see if there are local pytorch weights try: # Get weights for a local model, a hub cached model and inside the WEIGHTS_CACHE_OVERRIDE try: local_pt_files = utils.weight_files(model_id, revision, ".bin") except Exception: local_pt_files = utils.weight_files(model_id, revision, ".pt") # No local pytorch weights except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError): if extension == ".safetensors": logger.warning( f"No safetensors weights found for model {model_id} at revision {revision}. " f"Downloading PyTorch weights." ) # Try to see if there are pytorch weights on the hub pt_filenames = utils.weight_hub_files(model_id, revision, ".bin") # Download pytorch weights local_pt_files = utils.download_weights(pt_filenames, model_id, revision) if auto_convert: if not trust_remote_code: logger.warning( "๐Ÿšจ๐ŸšจBREAKING CHANGE in 2.0๐Ÿšจ๐Ÿšจ: Safetensors conversion is disabled without `--trust-remote-code` because " "Pickle files are unsafe and can essentially contain remote code execution!" "Please check for more information here: https://huggingface.co/docs/text-generation-inference/basic_tutorials/safety", ) logger.warning( f"No safetensors weights found for model {model_id} at revision {revision}. " f"Converting PyTorch weights to safetensors." ) # Safetensors final filenames local_st_files = [ p.parent / f"{p.stem.lstrip('pytorch_')}.safetensors" for p in local_pt_files ] try: import transformers import json if is_local_model: config_filename = os.path.join(model_id, "config.json") else: config_filename = hf_hub_download( model_id, revision=revision, filename="config.json" ) with open(config_filename, "r") as f: config = json.load(f) architecture = config["architectures"][0] class_ = getattr(transformers, architecture) # Name for this varible depends on transformers version. discard_names = getattr(class_, "_tied_weights_keys", []) except Exception: discard_names = [] # Convert pytorch weights to safetensors utils.convert_files(local_pt_files, local_st_files, discard_names) @app.command() def quantize( model_id: str, output_dir: str, revision: Optional[str] = None, logger_level: str = "INFO", json_output: bool = False, trust_remote_code: bool = False, upload_to_model_id: Optional[str] = None, percdamp: float = 0.01, act_order: bool = False, groupsize: int = 128, ): if revision is None: revision = "main" download_weights( model_id=model_id, revision=revision, logger_level=logger_level, json_output=json_output, ) from text_generation_server.layers.gptq.quantize import quantize quantize( model_id=model_id, bits=4, groupsize=groupsize, output_dir=output_dir, revision=revision, trust_remote_code=trust_remote_code, upload_to_model_id=upload_to_model_id, percdamp=percdamp, act_order=act_order, sym=True, ) if __name__ == "__main__": app()
text-generation-inference/backends/gaudi/server/text_generation_server/cli.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/cli.py", "repo_id": "text-generation-inference", "token_count": 5876 }
286
from dataclasses import dataclass from typing import Optional, Tuple, Type, Union, List import torch from text_generation_server.utils.weights import ( Weight, WeightsLoader, UnquantizedWeight, Weights, ) from vllm_hpu_extension.ops import scaled_fp8_quant from vllm_hpu_extension.scales import get_hpu_gaudi2_scale_factor, is_hpu_gaudi2 quant_dtype: torch.dtype = torch.float8_e4m3fn FP8_MAX = torch.finfo(torch.float8_e4m3fn).max if is_hpu_gaudi2(): FP8_MAX = torch.finfo(torch.float8_e4m3fnuz).max def pad_weight(weight, block_size): """Pads a matrix to make its dimensions multiples of block_size.""" M, N = weight.shape[-2:] block_size_m, block_size_n = block_size pad_M = (block_size_m - M % block_size_m) % block_size_m pad_N = (block_size_n - N % block_size_n) % block_size_n if pad_M == 0 and pad_N == 0: return weight, M, N # No padding needed padded_weight = torch.nn.functional.pad( weight, (0, pad_N, 0, pad_M), mode="constant", value=0 ) return padded_weight, M, N # Return original dimensions for unpadding def unpad_weight(weight, original_M, original_N, keep_first_dim=False): """Removes padding from the matrix to restore its original shape.""" if (weight.shape[-2] == original_M) and (weight.shape[-1] == original_N): return weight if keep_first_dim: return weight[:, :original_M, :original_N] else: return weight[:original_M, :original_N] def pad_block_fp8_weight_naive(weight, weight_scale, block_size): assert len(block_size) == 2 block_size_m, block_size_n = block_size weight_scale_m, weight_scale_n = weight_scale.shape[-2:] weight, orig_M, orig_N = pad_weight(weight, block_size) M, N = weight.shape[-2:] assert weight_scale_m == M // block_size_m assert weight_scale_n == N // block_size_n return weight, orig_M, orig_N def dynamic_quant(data, single_scale=False): if single_scale: scale = ((torch.abs(data)).max() + 1e-8) / FP8_MAX else: scale = ((torch.abs(data)).max(dim=-1).values + 1e-8) / FP8_MAX scale = scale.unsqueeze(-1) data_fp8 = torch.ops.hpu.cast_to_fp8_v2( data, 1.0 / scale, False, False, torch.float8_e4m3fn )[0] return data_fp8, scale.float() def dequant_block_fp8_weight_naive( weight, weight_scale, block_size, dtype=torch.bfloat16, original_M=None, original_N=None, do_unpad=False, ): if weight_scale is None: return weight assert len(block_size) == 2 weight_shape_len = len(weight.shape) block_size_m, block_size_n = block_size # mul scale if weight_shape_len == 2: weight_scale_m, weight_scale_n = weight_scale.shape weight_scale = weight_scale.view(weight_scale_m, 1, weight_scale_n, 1) weight = weight.view(weight_scale_m, block_size_m, weight_scale_n, block_size_n) if is_hpu_gaudi2(): fake_weight = weight.cpu().to(dtype).to(weight.device) dequant_weight = fake_weight * weight_scale.to(dtype) else: dequant_weight = weight.to(dtype) * weight_scale.to(dtype) dequant_weight = dequant_weight.view( weight_scale_m * block_size_m, weight_scale_n * block_size_n ) keep_first_dim = False elif weight_shape_len == 3: fd, weight_scale_m, weight_scale_n = weight_scale.shape weight_scale = weight_scale.view(fd, weight_scale_m, 1, weight_scale_n, 1) weight = weight.view( fd, weight_scale_m, block_size_m, weight_scale_n, block_size_n ) if is_hpu_gaudi2(): fake_weight = weight.cpu().to(dtype).to(weight.device) dequant_weight = fake_weight * weight_scale.to(dtype) else: dequant_weight = weight.to(dtype) * weight_scale.to(dtype) dequant_weight = dequant_weight.view( fd, weight_scale_m * block_size_m, weight_scale_n * block_size_n ) keep_first_dim = True else: raise ValueError("Only support original weight shape is either 2 or 3") if do_unpad: dequant_weight = unpad_weight( dequant_weight, original_M, original_N, keep_first_dim=keep_first_dim ) return dequant_weight def apply_block_fp8_linear_hpu_dynamic( input: torch.Tensor, weight: torch.Tensor, weight_scale: torch.Tensor, input_scale: Optional[torch.Tensor] = None, bias: Optional[torch.Tensor] = None, ) -> torch.Tensor: # View input as 2D matrix for fp8 methods input_2d = input.view(-1, input.shape[-1]) output_shape = [*input.shape[:-1], weight.shape[0]] x_fp8, x_scale = dynamic_quant(input_2d) output = torch.ops.hpu.fp8_gemm_v2( x_fp8, False, weight, True, None, torch.bfloat16, x_scale, weight_scale, None, False, ) if bias is not None: output = output + bias return output.to(dtype=input.dtype).view(*output_shape) def get_fp8_linear(force_w8a16: bool = False) -> Type[torch.nn.Module]: """ Return an FP8 linear `Module` that is compatible with the current system. """ # On other systems let Torch decide if the hardware supports FP8. return Fp8Linear def normalize_e4m3fn_to_native_float8( weight: torch.Tensor, weight_scale: torch.Tensor, input_scale: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: return weight, weight_scale, input_scale def per_tensor_dequantize( tensor: torch.Tensor, inv_scale: Union[float, torch.Tensor], dtype: torch.dtype = torch.float16, ) -> torch.Tensor: device = tensor.device dtype = torch.bfloat16 if is_hpu_gaudi2(): # dequant on cpu to avoid nan on gaudi2 tensor = tensor.to("cpu") fake_qweight = tensor.to(dtype).to(device) dq_weight = fake_qweight * inv_scale return dq_weight def requantize_with_max_scale( weight: torch.Tensor, weight_scale: torch.Tensor, logical_widths: int, dtype: torch.dtype, ) -> Tuple[torch.Tensor, torch.Tensor]: # Max scale to be used for requanitzation. max_w_scale = weight_scale.max() if is_hpu_gaudi2(): max_w_scale = max_w_scale * get_hpu_gaudi2_scale_factor() start = 0 for idx, logical_width in enumerate(logical_widths): end = start + logical_width weight_dq = per_tensor_dequantize( weight[start:end, :], weight_scale[start:end, :], dtype ) weight[start:end, :], max_w_scale_normalized = fp8_quantize( weight_dq, max_w_scale ) start = end return weight, max_w_scale_normalized def fp8_quantize( weight: torch.Tensor, scale: Optional[torch.Tensor] = None, scale_upper_bound: Optional[torch.Tensor] = None, qdtype: torch.dtype = torch.float8_e4m3fn, scalar: bool = False, ): """ This function returns a reciprocal of the scale, so that a tensor can be unscaled by multiplying it with the returned scale. If a scale is given through the `scale` argument, it must also be a reciprocal (so that scales from an FP8 checkpoint can be used without modification). """ shape = weight.shape qweight, scale = scaled_fp8_quant( weight.reshape(-1, shape[-1]), scale=scale, scale_ub=scale_upper_bound, # TODO: don't do this when we have to use the Torch kernel. use_per_token_if_dynamic=not scalar, ) return qweight.reshape(shape), scale class HybridFP8UnquantLoader(WeightsLoader): """Weight loader that loads FP8 and unquantized Torch tensors.""" def __init__( self, activation_scale_ub: Optional[float], to_fp8: bool, weight_block_size: Optional[List[int]] = None, ): self.activation_scale_ub = activation_scale_ub self.to_fp8 = to_fp8 self.weight_block_size = weight_block_size def get_weights(self, weights: "Weights", prefix: str): w = weights.get_tensor(f"{prefix}.weight") if w.dtype == torch.float8_e4m3fn: if self.weight_block_size is not None: scale = weights.get_tensor(f"{prefix}.weight_scale_inv") return Fp8Weight( weight=w, weight_scale=scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, weight_block_size=self.weight_block_size, ) # FP8 branch scale = weights.get_tensor(f"{prefix}.weight_scale", to_dtype=False) scale = scale.reshape(-1).expand(w.shape[0]) logical_widths = [w.shape[0]] w, scale = requantize_with_max_scale( w, scale.unsqueeze(-1).to(weights.device), logical_widths, weights.dtype ) input_scale = None if weights.has_tensor(f"{prefix}.input_scale"): input_scale = ( weights.get_tensor(f"{prefix}.input_scale", to_dtype=False) .reshape(-1) .max() ) return Fp8Weight( weight=w, weight_scale=scale, input_scale=input_scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, ) if self.to_fp8: return Fp8Weight(weight=w, dtype=weights.dtype) return UnquantizedWeight(w) def get_weights_col_packed( self, weights: Weights, prefix: str, block_sizes: Union[int, List[int]], ): w = weights.get_packed_sharded( f"{prefix}.weight", dim=0, block_sizes=block_sizes ) if w.dtype == torch.float8_e4m3fn: # FP8 branch scale = weights.get_tensor(f"{prefix}.weight_scale", to_dtype=False) if scale.numel() > 1: scale = weights.get_packed_sharded( f"{prefix}.weight_scale", dim=0, block_sizes=block_sizes, to_dtype=False, ) scale = scale.reshape(-1).expand(w.shape[0]) logical_widths = [w.shape[0]] w, scale = requantize_with_max_scale( w, scale.unsqueeze(-1).to(weights.device), logical_widths, weights.dtype ) input_scale = None if weights.has_tensor(f"{prefix}.input_scale"): input_scale = weights.get_tensor( f"{prefix}.input_scale", to_dtype=False ) if input_scale.numel() > 1: input_scale = weights.get_packed_sharded( f"{prefix}.input_scale", dim=0, block_sizes=block_sizes, to_dtype=False, ) input_scale = input_scale.reshape(-1).max() return Fp8Weight( weight=w, weight_scale=scale, input_scale=input_scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, ) if self.to_fp8: return Fp8Weight(weight=w, dtype=weights.dtype) return UnquantizedWeight(w) def get_multi_weights_col(self, weights: "Weights", prefixes: List[str], dim: int): # FIXME: Force to_device to false as fp8 weights do not support torch.cat on device yet w = [ weights.get_sharded(f"{p}.weight", dim=0, to_device=False) for p in prefixes ] shapes = [x.shape for x in w] # Concat then send to the device w = torch.cat(w, dim=dim).to(weights.device) # FP8 branch if w.dtype == torch.float8_e4m3fn: if self.weight_block_size is not None: scale = [ weights.get_sharded(f"{p}.weight_scale_inv", dim=0, to_device=False) for p in prefixes ] scale = torch.cat(scale, dim=dim) scale = scale.to(weights.device) return Fp8Weight( weight=w, weight_scale=scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, weight_block_size=self.weight_block_size, ) scale = [ _load_scalar_or_matrix_scale(weights, f"{p}.weight_scale", shape) for p, shape in zip(prefixes, shapes) ] scale = torch.cat(scale, dim=0).reshape(-1) logical_widths = [x[0] for x in shapes] w, scale = requantize_with_max_scale( w, scale.unsqueeze(-1).to(weights.device), logical_widths, weights.dtype ) input_scale = [ _load_scalar_or_matrix_scale(weights, f"{p}.input_scale", shape) for p, shape in zip(prefixes, shapes) if weights.has_tensor(f"{p}.input_scale") ] assert len(input_scale) == 0 or len(input_scale) == len(prefixes) input_scale = ( torch.cat(input_scale, dim=0).reshape(-1).max() if len(input_scale) != 0 else None ) return Fp8Weight( weight=w, weight_scale=scale, input_scale=input_scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, ) if self.to_fp8: return Fp8Weight(weight=w, dtype=weights.dtype) return UnquantizedWeight(w) def get_multi_weights(self, weights: "Weights", prefixes: List[str], dim: int): # FIXME: Force to_device to false as fp8 weights do not support torch.cat on device yet w = [weights.get_tensor(f"{p}.weight", to_device=False) for p in prefixes] shapes = [x.shape for x in w] # Concat then send to the device w = torch.cat(w, dim=dim).to(weights.device) # FP8 branch if w.dtype == torch.float8_e4m3fn: if self.weight_block_size is not None: scale = [ weights.get_tensor(f"{p}.weight_scale_inv", to_device=False) for p in prefixes ] scale = torch.cat(scale, dim=dim) scale = scale.to(weights.device) return Fp8Weight( weight=w, weight_scale=scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, weight_block_size=self.weight_block_size, ) scale = [ weights.get_tensor(f"{p}.weight_scale", to_dtype=False) .reshape(-1) .expand(shape[0]) for p, shape in zip(prefixes, shapes) ] scale = torch.cat(scale, dim=0).reshape(-1) logical_widths = [x[0] for x in shapes] w, scale = requantize_with_max_scale( w, scale.unsqueeze(-1).to(weights.device), logical_widths, weights.dtype ) input_scale = [ weights.get_tensor(f"{p}.input_scale", to_dtype=False).reshape(-1) for p in prefixes if weights.has_tensor(f"{p}.input_scale") ] assert len(input_scale) == 0 or len(input_scale) == len(prefixes) input_scale = ( torch.cat(input_scale, dim=0).reshape(-1).max() if len(input_scale) != 0 else None ) return Fp8Weight( weight=w, weight_scale=scale, input_scale=input_scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, ) if self.to_fp8: return Fp8Weight(weight=w, dtype=weights.dtype) return UnquantizedWeight(w) def get_weights_row(self, weights: "Weights", prefix: str): w = weights.get_sharded(f"{prefix}.weight", dim=1) # FP8 branch if w.dtype == torch.float8_e4m3fn: if self.weight_block_size is not None: # XXX: Yes the weights is named scale_inv, but corresponds to scale it seems. scale = weights.get_sharded(f"{prefix}.weight_scale_inv", dim=1) return Fp8Weight( weight=w, weight_scale=scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, weight_block_size=self.weight_block_size, ) scale = ( weights.get_tensor(f"{prefix}.weight_scale", to_dtype=False) .reshape(-1) .expand(w.shape[0]) ) logical_widths = [w.shape[0]] w, scale = requantize_with_max_scale( w, scale.unsqueeze(-1).to(weights.device), logical_widths, weights.dtype ) input_scale = None if weights.has_tensor(f"{prefix}.input_scale"): input_scale = ( weights.get_tensor(f"{prefix}.input_scale", to_dtype=False) .reshape(-1) .max() ) return Fp8Weight( weight=w, weight_scale=scale, input_scale=input_scale, activation_scale_ub=self.activation_scale_ub, dtype=weights.dtype, ) if self.to_fp8: return Fp8Weight(weight=w, dtype=weights.dtype) return UnquantizedWeight(w) @dataclass class Fp8Weight(Weight): weight: torch.Tensor dtype: torch.dtype weight_scale: Optional[torch.Tensor] = None input_scale: Optional[torch.Tensor] = None activation_scale_ub: Optional[float] = None force_w8a16: bool = False weight_block_size: Optional[List[int]] = None def get_linear(self, bias: torch.Tensor): if self.weight_scale is None: return get_fp8_linear(force_w8a16=self.force_w8a16).from_unquant( self.weight, bias, self.dtype ) # This is not checked by the fbgemm kernels, but they require contiguous # memory. Can be non-contiguous when we e.g. expand from scalars. self.weight_scale = self.weight_scale.contiguous() return get_fp8_linear(force_w8a16=self.force_w8a16).from_fp8( weight=self.weight, scale=self.weight_scale, dtype=self.dtype, bias=bias, input_scale=self.input_scale, scale_upper_bound=self.activation_scale_ub, weight_block_size=self.weight_block_size, ) class Fp8Linear(torch.nn.Module): _device_identity_cache = {} def __init__( self, qweight: torch.Tensor, scale: torch.Tensor, dtype: torch.dtype, bias: Optional[torch.Tensor] = None, input_scale: Optional[torch.Tensor] = None, scale_upper_bound: Optional[float] = None, weight_block_size: Optional[List[int]] = None, ) -> None: super().__init__() self.dtype = dtype self.qweight = qweight self.scale = scale.float() self.input_scale = input_scale.float() if input_scale is not None else None self.weight_block_size = weight_block_size self.scale_upper_bound = scale_upper_bound self.bias = bias if bias is not None else None @classmethod def from_unquant(cls, weight, bias, dtype): qweight, scale = fp8_quantize(weight, scalar=True) return cls( qweight=qweight, scale=scale, dtype=dtype, bias=bias, input_scale=None, scale_upper_bound=None, ) @classmethod def from_fp8( cls, weight: torch.Tensor, scale: torch.Tensor, dtype: torch.dtype, bias: Optional[torch.Tensor] = None, **kwargs, ) -> "Fp8Linear": input_scale = kwargs.get("input_scale", None) scale_upper_bound = kwargs.get("scale_upper_bound", None) weight_block_size = kwargs.get("weight_block_size", None) if weight_block_size is not None: weight, orig_M, orig_N = pad_block_fp8_weight_naive( weight, scale, weight_block_size ) weight, scale = dynamic_quant( dequant_block_fp8_weight_naive( weight, scale, weight_block_size, original_M=orig_M, original_N=orig_N, do_unpad=True, ) ) scale = scale.squeeze(-1) return cls( qweight=weight, scale=scale, input_scale=input_scale, scale_upper_bound=scale_upper_bound, bias=bias, dtype=dtype, weight_block_size=weight_block_size, ) def forward(self, input: torch.Tensor) -> torch.Tensor: if self.weight_block_size is not None or self.input_scale is None: return apply_block_fp8_linear_hpu_dynamic( input, self.qweight, self.scale, self.input_scale, self.bias ) x_fp8 = torch.ops.hpu.cast_to_fp8_v2( input, 1.0 / self.input_scale, False, False, torch.float8_e4m3fn )[0] return torch.ops.hpu.fp8_gemm_v2( A=x_fp8, trans_A=False, B=self.qweight, trans_B=True, D=None, out_dtype=input.dtype, A_scale_inv=self.input_scale, B_scale_inv=self.scale, bias=self.bias, accumulate=False, ) def _load_scalar_or_matrix_scale(weights: Weights, prefix: str, shape: torch.Size): scale = weights.get_tensor(prefix, to_dtype=False) if scale.numel() > 1: scale = weights.get_sharded(prefix, dim=0, to_dtype=False) return scale.reshape(-1).expand(shape[0])
text-generation-inference/backends/gaudi/server/text_generation_server/layers/fp8.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/layers/fp8.py", "repo_id": "text-generation-inference", "token_count": 11380 }
287
import torch from torch.nn import functional as F from typing import Iterable, List from text_generation_server.layers.linear import get_linear, FastLinear import habana_frameworks.torch as htorch class LayerConcat(torch.nn.Module): """ Apply multiple layers to the input and concatenate their outputs. """ def __init__(self, layers: Iterable[torch.nn.Module], dim: int = -1): """ `dim` is the dimension along which layer outputs are concatenated. """ super().__init__() self.layers = layers self.dim = dim def forward(self, x: torch.Tensor): outputs = [layer(x) for layer in self.layers] return torch.cat(outputs, self.dim) class SuperLayer(torch.nn.Module): def __init__(self, linear): super().__init__() self.linear = linear def forward(self, x): return self.linear.forward(x) class TensorParallelHead(SuperLayer): def __init__(self, linear, process_group, should_gather: bool): super().__init__(linear) self.process_group = process_group self.should_gather = should_gather @staticmethod def load(config, prefix: str, weights): if config.quantize == "exl2": try: # If the piece and LM head embeddings are shared, we have # non-quantized weights... weight = weights.get_tensor(f"{prefix}.weight") except Exception: # ...otherwise they are quantized. weight = weights.get_weights_col(prefix) should_gather = weights.process_group.size() > 1 elif weights.process_group.size() > 1: try: weight = weights.get_sharded(f"{prefix}.weight", dim=0) should_gather = True except AssertionError: # If the vocab size is not divisible by number of shards # just load the entire thing. weight = weights.get_tensor(f"{prefix}.weight") should_gather = False else: weight = weights.get_tensor(f"{prefix}.weight") should_gather = False return TensorParallelHead( get_linear(weight, bias=None), process_group=weights.process_group, should_gather=should_gather, ) def forward(self, input: torch.Tensor) -> torch.Tensor: if not self.should_gather: return super().forward(input) world_size = self.process_group.size() if len(input.shape) == 2 and isinstance(self.linear, FastLinear): out_dim = self.linear.weight.shape[0] if input.shape[0] == 1: world_out = input.new_empty(1, out_dim * world_size) local_out = input.new_empty(1, out_dim) gather_input = local_out else: world_out = input.new_empty(out_dim * world_size, input.shape[0]) gather_input = input.new_empty(out_dim, input.shape[0]) local_out = gather_input.T torch.mm(input, self.linear.weight.T, out=local_out) htorch.core.mark_step() torch.distributed.all_gather_into_tensor( world_out, gather_input, group=self.process_group ) if input.shape[0] == 1: return world_out return world_out.T output = super().forward(input) world_output = [ torch.empty_like(output) for _ in range(self.process_group.size()) ] htorch.core.mark_step() torch.distributed.all_gather(world_output, output, group=self.process_group) world_output = torch.cat(world_output, dim=-1) return world_output class TensorParallelColumnLinear(SuperLayer): @classmethod def load_gate_up(cls, config, prefix: str, weights, bias: bool): """Specific method when the QKV was joined after the fact""" weight = weights.get_weights_col_packed_gate_up(prefix) if bias: raise NotImplementedError("packed_gate_up only implemented without bias") else: bias = None linear = get_linear(weight, bias) return cls(linear) @classmethod def load_qkv( cls, config, prefix: str, weights, bias: bool, num_heads: int, num_key_value_heads: int, ): """Specific method when the QKV was joined after the fact""" weight = weights.get_weights_col_packed_qkv( prefix, num_heads=num_heads, num_key_value_heads=num_key_value_heads, ) if bias: raise NotImplementedError("packed_qkv only implemented for baichuan") else: bias = None linear = get_linear(weight, bias) return cls(linear) @classmethod def load(cls, config, prefix: str, weights, bias: bool): weight = weights.get_weights_col(prefix) if bias: bias = weights.get_sharded(f"{prefix}.bias", dim=0) else: bias = None linear = get_linear(weight, bias) return cls(linear) @classmethod def load_multi(cls, config, prefixes: List[str], weights, bias: bool, dim: int): if config.quantize == "exl2": linears = [] for prefix in prefixes: weight = weights.get_weights_col(prefix) b = weights.get_tensor(f"{prefix}.bias") if bias else None linears.append(get_linear(weight, b)) linear = LayerConcat(linears) else: weight = weights.get_multi_weights_col(prefixes, dim=dim) if bias: b = [weights.get_sharded(f"{p}.bias", dim=0) for p in prefixes] bias = torch.cat(b, dim=dim) else: bias = None linear = get_linear(weight, bias) return cls(linear) class TensorParallelRowLinear(SuperLayer): def __init__(self, linear, process_group): super().__init__(linear) self.process_group = process_group @classmethod def load(cls, config, prefix: str, weights, bias: bool): weight = weights.get_weights_row(prefix) if bias and weights.process_group.rank() == 0: # Rank is only on the first rank process bias = weights.get_tensor(f"{prefix}.bias") else: bias = None return cls( get_linear(weight, bias), process_group=weights.process_group, ) def forward(self, input: torch.Tensor, reduce: bool = True) -> torch.Tensor: out = super().forward(input) if self.process_group.size() > 1 and reduce: # FIXME(kzawora): this is a workaround for a bug in Habana PT bridge # occurring when PT_HPU_ENABLE_LAZY_COLLECTIVES=true env var is used # (which is required for tensor parallel HPUGraph inference) htorch.core.mark_step() torch.distributed.all_reduce(out, group=self.process_group) return out class TensorParallelEmbedding(torch.nn.Module): def __init__(self, prefix: str, weights, reduce=True): super().__init__() weight = weights.get_partial_sharded(f"{prefix}.weight", dim=0) num_embeddings = weights.get_shape(f"{prefix}.weight")[0] process_group = weights.process_group world_size = process_group.size() rank = process_group.rank() block_size = (num_embeddings + world_size - 1) // world_size self.min_id = rank * block_size self.max_id = min(num_embeddings, (rank + 1) * block_size) self.null_idx = weight.shape[ 0 ] # Usually block_size, might be less in non even vocab_size. self.process_group = weights.process_group self.reduce = reduce """Additional 0 entry used for masking""" self.weight = torch.nn.Parameter(F.pad(weight, (0, 0, 0, 1))) def forward(self, input: torch.Tensor) -> torch.Tensor: # default all out of bounds values to `self.null_idx` that will then be mapped to 0 # translate for [0, self.max_id - self.min_id[ input = torch.where( (self.min_id > input) | (input >= self.max_id), self.null_idx, input - self.min_id, ) out = torch.nn.functional.embedding(input, self.weight) if self.reduce and self.process_group.size() > 1: # FIXME(kzawora): this is a workaround for a bug in Habana PT bridge # occurring when PT_HPU_ENABLE_LAZY_COLLECTIVES=true env var is used # (which is required for tensor parallel HPUGraph inference) htorch.core.mark_step() torch.distributed.all_reduce(out, group=self.process_group) return out
text-generation-inference/backends/gaudi/server/text_generation_server/layers/tensor_parallel.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/layers/tensor_parallel.py", "repo_id": "text-generation-inference", "token_count": 4080 }
288
# Origin: https://github.com/predibase/lorax # Path: lorax/server/lorax_server/utils/adapter.py # License: Apache License Version 2.0, January 2004 import warnings import re from dataclasses import dataclass from functools import lru_cache from typing import TYPE_CHECKING, Set, Tuple, Optional, List from safetensors.torch import load_file from transformers import AutoConfig, AutoTokenizer, PreTrainedTokenizer from text_generation_server.utils.merges.strategies import merge_adapters from text_generation_server.utils import hub from text_generation_server.adapters.lora import LoraConfig if TYPE_CHECKING: from text_generation_server.adapters.config import AdapterConfig, ModuleMap BASE_MODEL_ADAPTER_ID = "__base_model__" @dataclass class AdapterInfo: id: str path: Optional[str] revision: Optional[str] = None @dataclass class AdapterParameters: adapter_info: Tuple[AdapterInfo] weights: Tuple[float] merge_strategy: NotImplemented density: float majority_sign_method: NotImplemented @dataclass class AdapterSource: adapter_id: str model_id: str revision: str def parse_lora_adapters(lora_adapters: Optional[str]) -> List[AdapterInfo]: if not lora_adapters: return [] adapter_list = [] for adapter in lora_adapters.split(","): adapter = adapter.strip() if adapter.count("=") > 1 or adapter.count("@") > 1: raise ValueError(f"Invalid LoRA adapter format: {adapter}") match = re.match(r"^([^=@]+)(?:=([^@]+))?(?:@(.+))?$", adapter) if match: adapter_id, path, revision = match.groups() adapter_list.append( AdapterInfo(id=adapter_id, path=path, revision=revision) ) else: raise ValueError(f"Invalid LoRA adapter format: {adapter}") return adapter_list def load_and_merge_adapters( model_id: str, adapter_parameters: AdapterParameters, adapter_index: int, weight_names: Tuple[str], trust_remote_code: bool = False, ) -> Tuple["ModuleMap", "AdapterConfig", Set[str], PreTrainedTokenizer]: if len(adapter_parameters.adapter_info) == 1: adapter = next(iter(adapter_parameters.adapter_info)) return load_module_map( model_id, adapter.revision, adapter.id, adapter.path, weight_names, trust_remote_code, ) adapter_params = AdapterParametersContainer(adapter_parameters, adapter_index) return _load_and_merge( model_id, adapter_params, weight_names, trust_remote_code, ) @dataclass class AdapterParametersContainer: adapter_parameters: AdapterParameters adapter_index: int def __hash__(self) -> int: return self.adapter_index @lru_cache(maxsize=32) def _load_and_merge( model_id: str, adapter_params: AdapterParametersContainer, weight_names: Tuple[str], trust_remote_code: bool = False, ) -> Tuple["ModuleMap", "AdapterConfig", Set[str], PreTrainedTokenizer]: params = adapter_params.adapter_parameters adapters_to_merge = [] merged_weight_names = set() tokenizer = None for adapter in params.adapter_info: if adapter.id == BASE_MODEL_ADAPTER_ID: raise ValueError("Base model adapter cannot be merged.") module_map, adapter_config, adapter_weight_names, adapter_tokenizer = ( load_module_map( model_id, adapter.revision, adapter.id, adapter.path, weight_names, trust_remote_code, ) ) adapters_to_merge.append((module_map, adapter_config)) merged_weight_names = merged_weight_names.union(adapter_weight_names) if tokenizer is None: tokenizer = adapter_tokenizer if len(adapters_to_merge) == 0: raise ValueError("No adapters to merge.") module_map, adapter_config = merge_adapters(adapters_to_merge, params) return module_map, adapter_config, merged_weight_names, tokenizer def check_architectures( model_id: str, adapter_id: str, adapter_config: "AdapterConfig", trust_remote_code: bool = False, ): try: if not adapter_config.base_model_name_or_path: # Avoid execution latency caused by the network connection retrying for AutoConfig.from_pretrained(None) return expected_config = AutoConfig.from_pretrained( model_id, trust_remote_code=trust_remote_code ) model_config = AutoConfig.from_pretrained( adapter_config.base_model_name_or_path, trust_remote_code=trust_remote_code ) except Exception as e: warnings.warn( f"Unable to check architecture compatibility for adapter '{adapter_id}' " f"against model '{model_id}'. Assuming they are compatible. Error: {e}" ) return if model_config.architectures == expected_config.architectures: warnings.warn( f"Adapter '{adapter_id}' was not trained on base model '{model_id}'. " f"If you encounter issues, use --model-id '{adapter_config.base_model_name_or_path}' instead." ) else: # TODO(travis): revisit this when we support clasification heads which will not use CausalLM raise ValueError( f"Adapter '{adapter_id}' is not compatible with model '{model_id}'. " f"Architectures differ: {model_config.architectures} != {expected_config.architectures}. " f"Use --model-id '{adapter_config.base_model_name_or_path}' instead." ) @lru_cache(maxsize=128) def load_module_map( model_id: str, revision: str, adapter_id: str, adapter_path: Optional[str], weight_names: Tuple[str], trust_remote_code: bool = False, ) -> Tuple["ModuleMap", "AdapterConfig", Set[str], PreTrainedTokenizer]: adapter_config = LoraConfig.load(adapter_path or adapter_id, None) if not adapter_path and adapter_config.base_model_name_or_path != model_id: check_architectures(model_id, adapter_id, adapter_config, trust_remote_code) adapter_filenames = ( hub._weight_files_from_dir(adapter_path, extension=".safetensors") if adapter_path else hub._cached_weight_files( adapter_id, revision=revision, extension=".safetensors" ) ) # throw an error if no adapter weights are found if not adapter_filenames: raise FileNotFoundError( f"No adapter weights found for adapter '{adapter_id}' and revision '{revision}'." ) try: adapter_tokenizer = AutoTokenizer.from_pretrained( adapter_config.config_path, trust_remote_code=trust_remote_code, ) except Exception: # Adapter does not have a tokenizer, so fallback to base model tokenizer adapter_tokenizer = None # load adapter weights from all shards (should have relatively small memory footprint) adapter_weights = {} for filename in adapter_filenames: adapter_weights.update(load_file(filename)) # map the model weights to the relevant adapter weights (LoRA A and B matrices) module_map, adapter_weight_names = adapter_config.map_weights_for_model( adapter_weights, weight_names ) return module_map, adapter_config, adapter_weight_names, adapter_tokenizer def get_attn_weights(i, layer): qkv = layer.self_attn.query_key_value weights = {} for k in ["q", "k", "v"]: key = (i, f"{k}_proj") value = (f"model.layers.{i}.self_attn.{k}_proj", qkv) weights[key] = value # also add the qkv_proj weight for the adapter weights[(i, "qkv_proj")] = ( f"model.layers.{i}.self_attn.qkv_proj", qkv, ) weights[(i, "o_proj")] = ( f"model.layers.{i}.self_attn.o_proj", layer.self_attn.o_proj, ) return weights def get_mlp_weights(i, layer): weights = {} if hasattr(layer, "mlp"): mlp = layer.mlp if hasattr(mlp, "gate_up_proj"): # handle combined gate_up_proj (e.g., for some LLaMA variants) weights.update( { (i, "gate_proj"): ( f"model.layers.{i}.mlp.gate_proj", mlp.gate_up_proj, ), (i, "up_proj"): (f"model.layers.{i}.mlp.up_proj", mlp.gate_up_proj), } ) else: # handle separate gate_proj, up_proj, and down_proj (e.g., for Gemma) if hasattr(mlp, "gate_proj"): weights[(i, "gate_proj")] = ( f"model.layers.{i}.mlp.gate_proj", mlp.gate_proj, ) if hasattr(mlp, "up_proj"): weights[(i, "up_proj")] = (f"model.layers.{i}.mlp.up_proj", mlp.up_proj) if hasattr(mlp, "down_proj"): weights[(i, "down_proj")] = ( f"model.layers.{i}.mlp.down_proj", mlp.down_proj, ) return weights # build_layer_weight_lookup creates a mapping of model layers to their corresponding # weight tensors and paths. It builds a dictionary that maps layer identifiers to tuples # containing the weight tensor path and the actual layer object. This mapping is needed # for the lora adapter to know which weights to update when applying the adapter. def build_layer_weight_lookup(model): if hasattr(model, "language_model"): m = model.language_model.model elif hasattr(model, "text_model"): m = model.text_model.model else: m = model.model layer_weights = {} for i, layer in enumerate(m.layers): attn_weights = get_attn_weights(i, layer) mlp_weights = get_mlp_weights(i, layer) layer_weights.update(attn_weights) layer_weights.update(mlp_weights) lm_head = None if hasattr(m, "lm_head"): lm_head = m.lm_head elif hasattr(model, "lm_head"): lm_head = model.lm_head if lm_head: layer_weights[(0, "lm_head")] = ("lm_head", lm_head) return layer_weights
text-generation-inference/backends/gaudi/server/text_generation_server/utils/adapter.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/utils/adapter.py", "repo_id": "text-generation-inference", "token_count": 4488 }
289
# Origin: https://github.com/predibase/lorax # Path: lorax/server/lorax_server/utils/sgmv.py # License: Apache License Version 2.0, January 2004 import os import warnings from functools import lru_cache from typing import List, Tuple import torch import torch.nn.functional as F try: import punica_kernels as _kernels HAS_SGMV = not bool(os.environ.get("DISABLE_SGMV", "")) except ImportError: warnings.warn("Could not import SGMV kernel from Punica, falling back to loop.") _kernels = None HAS_SGMV = False MIN_SGMV_RANK = 8 MIN_RANK_CUSTOM = 16 MAX_RANK_CUSTOM = 128 SGMV_BLOCK_SIZE = 16 BGMV_MAX_RANK = 64 def has_sgmv() -> bool: return HAS_SGMV def pad_rank(t: torch.Tensor, dim: int, world_size: int) -> torch.Tensor: """Pad a tensor to the minimum rank for SGMV and the nearest multiple of the SGMV block size.""" if not has_sgmv(): return t # tensor parallelism will result in effective rank being divided by world_size, # so we need to scale the min rank to offset that effect min_rank = MIN_SGMV_RANK * world_size # if we're at or below the min rank, pad up to the min rank # otherwise, pad to the nearest multiple of the block size current_rank = t.size(dim) target_rank = ( min_rank if current_rank <= min_rank else (current_rank + SGMV_BLOCK_SIZE - 1) // SGMV_BLOCK_SIZE * SGMV_BLOCK_SIZE ) if current_rank == target_rank: return t pad_size = target_rank - current_rank # see complicatd pad syntax here: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html pad = [0, 0] * t.dim() pad[(t.dim() - dim - 1) * 2 + 1] = pad_size pad = tuple(pad) return F.pad(t, pad, mode="constant", value=0.0) def use_cutlass_shrink(lora_rank: int) -> bool: return lora_rank < MIN_RANK_CUSTOM def orient_for_rank(t: torch.Tensor, rank: int) -> torch.Tensor: if MIN_RANK_CUSTOM <= rank <= MAX_RANK_CUSTOM: return t.transpose(0, 1) return t # Source: https://github.com/punica-ai/punica/blob/master/src/punica/ops/__init__.py def add_lora_sgmv_cutlass( y: torch.Tensor, x: torch.Tensor, wa_ptr: torch.Tensor, wb_ptr: torch.Tensor, s_start: torch.Tensor, s_end: torch.Tensor, layer_idx: int, lora_rank: int, ): """ Semantics: y[s[i]:s[i+1]] += x[s[i]:s[i+1]] @ deref(wa_ptr[i]).T @ deref(wb_ptr[i]) Args: y: Shape: `[B, H2]`. Output vectors. Will be changed in-place. x: Shape: `[B, H1]`. Input vectors. wa_ptr: Shape: `[S]`. DType: torch.int64. Pointer to the weight matrices.\ Weight matrix shape: `[num_layers, R, H1]`. wb_ptr: Shape: `[S]`. DType: torch.int64. Pointer to the weight matrices.\ Weight matrix shape: `[num_layers, R, H2]`. s_start: Shape: `[S]`, DType: torch.int32. Indptr of the weight matrices start indices. s_end: Shape: `[S]`, DType: torch.int32. Indptr of the weight matrices end indices. layer_idx: Layer index of the weight matrices. """ if lora_rank < MIN_RANK_CUSTOM or lora_rank > MAX_RANK_CUSTOM: # Custom SGMV shrink only supports rank 16, 32, 64, 128 _add_lora_sgmv_cutlass_legacy( y, x, wa_ptr, wb_ptr, s_start, s_end, layer_idx, lora_rank ) return tmp1 = torch.empty((8 * 1024 * 1024,), dtype=torch.uint8, device=x.device) tmp2_size = _kernels.sgmv_cutlass_tmp_size(wa_ptr.size(0)) tmp2 = torch.empty((tmp2_size,), dtype=torch.uint8, device=x.device) v = torch.zeros((x.size(0), lora_rank), dtype=x.dtype, device=x.device) _kernels.sgmv_shrink(v, x, wa_ptr, s_start, s_end, tmp1, layer_idx) _kernels.sgmv_cutlass(y, v, wb_ptr, s_start, s_end, tmp2, layer_idx) def _add_lora_sgmv_cutlass_legacy( y: torch.Tensor, x: torch.Tensor, wa_ptr: torch.Tensor, wb_ptr: torch.Tensor, s_start: torch.IntTensor, s_end: torch.IntTensor, layer_idx: int, lora_rank: int, ): tmp_size = _kernels.sgmv_cutlass_tmp_size(wa_ptr.size(0)) tmp = torch.empty((tmp_size,), dtype=torch.uint8, device=x.device) v = torch.zeros((x.size(0), lora_rank), dtype=x.dtype, device=x.device) _kernels.sgmv_cutlass(v, x, wa_ptr, s_start, s_end, tmp, layer_idx) _kernels.sgmv_cutlass(y, v, wb_ptr, s_start, s_end, tmp, layer_idx) @lru_cache(maxsize=1) def get_tmp_tensor(device: torch.device) -> torch.Tensor: return torch.empty((8 * 1024 * 1024,), dtype=torch.uint8, device=device) @lru_cache(maxsize=32) def get_tmp_tensor_for_size(size: int, device: torch.device) -> torch.Tensor: tmp_size = _kernels.sgmv_cutlass_tmp_size(size) return torch.empty((tmp_size,), dtype=torch.uint8, device=device) def get_tmp_tensor_for_size_no_kernels(size: int, device: torch.device) -> torch.Tensor: return torch.empty((size,), dtype=torch.uint8, device=device) def get_tmp_expand_size(size: int) -> int: return _kernels.sgmv_cutlass_tmp_size(size) def get_tmp_tensors( nsegments: int, lora_rank: int, device: torch.device ) -> Tuple[torch.Tensor, torch.Tensor]: use_cutlass = use_cutlass_shrink(lora_rank) and has_sgmv() has_sgmv_available = has_sgmv() if use_cutlass: tmp = get_tmp_tensor_for_size(nsegments, device) return tmp, tmp elif has_sgmv_available: return get_tmp_tensor(device), get_tmp_tensor_for_size(nsegments, device) else: tmp = get_tmp_tensor_for_size(nsegments, device) return tmp, tmp def lora_a_sgmv_cutlass( x: torch.Tensor, tmp: torch.Tensor, wa_ptr: torch.Tensor, s_start: torch.IntTensor, s_end: torch.IntTensor, layer_idx: int, lora_rank: int, ) -> torch.Tensor: v = torch.zeros((x.size(0), lora_rank), dtype=x.dtype, device=x.device) if MIN_RANK_CUSTOM <= lora_rank <= MAX_RANK_CUSTOM: _kernels.sgmv_shrink(v, x, wa_ptr, s_start, s_end, tmp, layer_idx) else: _kernels.sgmv_cutlass(v, x, wa_ptr, s_start, s_end, tmp, layer_idx) return v def lora_b_sgmv_cutlass( y: torch.Tensor, v: torch.Tensor, tmp: torch.Tensor, wb_ptr: torch.Tensor, s_start: torch.IntTensor, s_end: torch.IntTensor, layer_idx: int, ): _kernels.sgmv_cutlass(y, v, wb_ptr, s_start, s_end, tmp, layer_idx) """ Semantics: y[i] += ( x[i].unsqueeze(0) @ wa_T_all[indices[i], layer_idx, :, :].transpose(-1, -2) @ wb_T_all[indices[i], layer_idx, :, :].transpose(-1, -2) * scale ).squeeze(0) Args: y: Shape: `[B, H2]`. Output vectors. Will be changed in-place. v: Shape: `[B, R]`. Temporary vector. x: Shape: `[B, H1]`. Input vectors. wa_T_all: Shape: `[None, L, R, H1]`. All of the transposed LoRA A matrices. wb_T_all: Shape: `[None, L, H2, R]`. All of the transposed LoRA B matrices. indicies: Shape: `[B]`. Indices of the LoRA weights. layer_idx: Layer index of LoRA weights. scale: Scaling factor. """ def add_lora_a_bgmv( v: torch.Tensor, x: torch.Tensor, wa_T_all: torch.Tensor, indicies: torch.LongTensor, layer_idx: int, ): _kernels.dispatch_bgmv(v, x, wa_T_all, indicies, layer_idx, 1.0) def add_lora_b_bgmv( y: torch.Tensor, v: torch.Tensor, wb_T_all: torch.Tensor, indicies: torch.LongTensor, layer_idx: int, ): _kernels.dispatch_bgmv(y, v, wb_T_all, indicies, layer_idx, 1.0) def segmented_matmul( y: torch.Tensor, x: torch.Tensor, w: List[torch.Tensor], b: List[torch.Tensor], s_start: torch.IntTensor, s_end: torch.IntTensor, ): for i in range(len(w)): if s_end[i] - s_start[i] <= 0: continue xi = x[s_start[i] : s_end[i]] wi = w[i] bi = b[i] y[s_start[i] : s_end[i]] = F.linear(xi, wi, bi)
text-generation-inference/backends/gaudi/server/text_generation_server/utils/sgmv.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/utils/sgmv.py", "repo_id": "text-generation-inference", "token_count": 3651 }
290
use crate::llamacpp; use std::ffi::CString; #[repr(u32)] #[derive(Debug, Clone, Copy)] pub enum QuantizeType { MostlyQ4_0 = 2, } pub fn model( input_path: &str, output_path: &str, ftype: QuantizeType, n_threads: usize, ) -> Result<(), String> { let c_input_path = CString::new(input_path).map_err(|e| format!("Failed to convert input path: {}", e))?; let c_output_path = CString::new(output_path).map_err(|e| format!("Failed to convert output path: {}", e))?; let result = unsafe { let mut params = llamacpp::model_quantize_default_params(); params.nthread = n_threads as _; params.ftype = ftype as _; params.quantize_output_tensor = true; llamacpp::model_quantize(c_input_path.as_ptr(), c_output_path.as_ptr(), &params) }; if result == 0 { Ok(()) } else { Err(format!("Quantization failed, error code: {}", result)) } }
text-generation-inference/backends/llamacpp/src/quantize.rs/0
{ "file_path": "text-generation-inference/backends/llamacpp/src/quantize.rs", "repo_id": "text-generation-inference", "token_count": 419 }
291
from argparse import ArgumentParser from huggingface_hub import HfApi def main(): parser = ArgumentParser() parser.add_argument("--yes", action="store_true", default=False) args = parser.parse_args() api = HfApi() models = api.list_models(search="optimum-internal-testing/neuron-tgi-testing") for model in models: if args.yes: delete = True else: answer = input(f"Do you want to delete {model.id} [y/N] ?") delete = answer == "y" if delete: api.delete_repo(model.id) print(f"Deleted {model.id}.") if __name__ == "__main__": main()
text-generation-inference/backends/neuron/tests/prune_test_models.py/0
{ "file_path": "text-generation-inference/backends/neuron/tests/prune_test_models.py", "repo_id": "text-generation-inference", "token_count": 285 }
292
use cxx_build::CFG; use pkg_config; use std::env; use std::env::consts::ARCH; use std::path::{absolute, PathBuf}; use std::sync::LazyLock; const ADDITIONAL_BACKEND_LINK_LIBRARIES: [&str; 1] = ["spdlog"]; const CUDA_ARCH_LIST: Option<&str> = option_env!("CUDA_ARCH_LIST"); const CUDA_REQUIRED_VERSION: &str = "12.8"; const MPI_REQUIRED_VERSION: &str = "4.1"; const INSTALL_PREFIX: Option<&str> = option_env!("CMAKE_INSTALL_PREFIX"); const TENSORRT_ROOT_DIR: Option<&str> = option_env!("TENSORRT_ROOT_DIR"); const NCCL_ROOT_DIR: Option<&str> = option_env!("NCCL_ROOT_DIR"); const IS_GHA_BUILD: LazyLock<bool> = LazyLock::new(|| { option_env!("SCCACHE_GHA_ENABLED").map_or(false, |value| match value.to_lowercase().as_str() { "on" => true, "true" => true, "1" => true, _ => false, }) }); // Dependencies const BACKEND_DEPS: &str = "tgi_trtllm_backend_impl"; const CUDA_TRANSITIVE_DEPS: [&str; 4] = ["cuda", "cudart", "cublas", "nvidia-ml"]; const TENSORRT_LLM_TRANSITIVE_DEPS: [(&str, &str); 5] = [ ("dylib", "tensorrt_llm"), ("dylib", "tensorrt_llm_nvrtc_wrapper"), ("dylib", "nvinfer_plugin_tensorrt_llm"), ("dylib", "decoder_attention_0"), ("dylib", "decoder_attention_1"), ]; macro_rules! probe { ($name: expr, $version: expr) => { if let Err(_) = pkg_config::probe_library($name) { pkg_config::probe_library(&format!("{}-{}", $name, $version)) .expect(&format!("Failed to locate {}", $name)); } }; } fn get_compiler_flag( switch: bool, true_case: &'static str, false_case: &'static str, ) -> &'static str { match switch { true => true_case, false => false_case, } } fn get_library_architecture() -> &'static str { let os = env::var("CARGO_CFG_TARGET_OS").unwrap(); let arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap(); let env = env::var("CARGO_CFG_TARGET_ENV").unwrap(); match os.as_str() { "linux" => { if env != "gnu" { panic!("unsupported linux ABI {env}, only 'gnu' is supported") } match arch.as_str() { "x86_64" => "x86_64-linux-gnu", "aarch64" => "aarch64-linux-gnu", _ => panic!("unsupported linux architecture {arch}"), } } "windows" => { if env != "msvc" { panic!("unsupported windows ABI {env}, only 'msvc' is supported") } match arch.as_str() { "x86_64" => "x86_64-windows-msvc", _ => panic!("unsupported windows architecture {arch}"), } } _ => panic!("unsupported OS {os}"), } } fn build_backend(is_debug: bool, opt_level: &str, out_dir: &PathBuf) -> (PathBuf, PathBuf) { // Build the backend implementation through CMake let install_path = INSTALL_PREFIX.unwrap_or("/usr/local/tgi"); let tensorrt_path = TENSORRT_ROOT_DIR.unwrap_or("/usr/local/tensorrt"); let cuda_arch_list = CUDA_ARCH_LIST.unwrap_or("75-real;80-real;86-real;89-real;90-real"); let mut install_path = PathBuf::from(install_path); if !install_path.is_absolute() { install_path = absolute(out_dir).expect("cannot happen").join(install_path); } let mut config = cmake::Config::new("."); config .uses_cxx11() .generator("Ninja") .profile(match is_debug { true => "Debug", false => "Release", }) .env("OPT_LEVEL", opt_level) .define("CMAKE_INSTALL_PREFIX", &install_path) .define("CMAKE_CUDA_COMPILER", "/usr/local/cuda/bin/nvcc") .define("CMAKE_LIBRARY_ARCHITECTURE", get_library_architecture()) .define("TGI_TRTLLM_BACKEND_TARGET_CUDA_ARCH_LIST", cuda_arch_list) .define( "TGI_TRTLLM_BACKEND_DEBUG", get_compiler_flag(is_debug, "ON", "OFF"), ) .define("TGI_TRTLLM_BACKEND_TRT_ROOT", tensorrt_path); if is_debug || *IS_GHA_BUILD { config.define("TGI_TRTLLM_BACKEND_BUILD_TESTS", "ON"); } if option_env!("USE_LLD_LINKER").is_some() { println!("cargo:warning=Using lld linker"); config.define("TGI_TRTLLM_BACKEND_BUILD_USE_LLD", "ON"); } if (is_debug && option_env!("ENABLE_ASAN").is_some()) || *IS_GHA_BUILD { println!("cargo:warning=Enabling Address Sanitizer"); config.define("TGI_TRTLLM_BACKEND_ENABLE_ASAN", "ON"); } if (is_debug && option_env!("ENABLE_UBSAN").is_some()) || *IS_GHA_BUILD { println!("cargo:warning=Enabling Undefined Sanitizer"); config.define("TGI_TRTLLM_BACKEND_ENABLE_UBSAN", "ON"); } if let Some(nvcc_host_compiler) = option_env!("CMAKE_CUDA_HOST_COMPILER") { config.define("CMAKE_CUDA_HOST_COMPILER", nvcc_host_compiler); } if let Some(wrapper) = option_env!("RUSTC_WRAPPER") { println!("cargo:warning=Using caching tool: {wrapper}"); config.define("CMAKE_C_COMPILER_LAUNCHER", wrapper); config.define("CMAKE_CXX_COMPILER_LAUNCHER", wrapper); config.define("CMAKE_CUDA_COMPILER_LAUNCHER", wrapper); } // Allow to override which Python to use ... if let Some(python3) = option_env!("Python3_EXECUTABLE") { config.define("Python3_EXECUTABLE", python3); } config.build(); // Additional transitive CMake dependencies let deps_folder = out_dir.join("build").join("_deps"); for dependency in ADDITIONAL_BACKEND_LINK_LIBRARIES { let dep_name = match is_debug { true => format!("{}d", dependency), false => String::from(dependency), }; let dep_path = deps_folder.join(format!("{}-build", dependency)); println!("cargo:rustc-link-search={}", dep_path.display()); println!("cargo:rustc-link-lib=static={}", dep_name); } // Emit linkage information from the artifacts we just built for path in ["lib", "lib64"] { let install_lib_path = install_path.join(path); println!( r"cargo:warning=Adding link search path: {}", install_lib_path.display() ); println!(r"cargo:rustc-link-search={}", install_lib_path.display()); } (PathBuf::from(install_path), deps_folder) } fn build_ffi_layer(deps_folder: &PathBuf, is_debug: bool) { CFG.include_prefix = "backends/trtllm"; cxx_build::bridge("src/lib.rs") .static_flag(true) .std("c++23") .include(deps_folder.join("spdlog-src").join("include")) .include(deps_folder.join("json-src").join("include")) .include(deps_folder.join("trtllm-src").join("cpp").join("include")) .include("/usr/local/cuda/include") .include("/usr/local/tensorrt/include") .include("csrc/") .file("csrc/ffi.hpp") .define( "TGI_TRTLLM_BACKEND_DEBUG", get_compiler_flag(is_debug, "ON", "OFF"), ) .compile("tgi_trtllm_backend"); println!("cargo:rerun-if-changed=CMakeLists.txt"); println!("cargo:rerun-if-changed=cmake/trtllm.cmake"); println!("cargo:rerun-if-changed=cmake/json.cmake"); println!("cargo:rerun-if-changed=cmake/spdlog.cmake"); println!("cargo:rerun-if-changed=csrc/backend.hpp"); println!("cargo:rerun-if-changed=csrc/backend.cpp"); println!("cargo:rerun-if-changed=csrc/hardware.hpp"); println!("cargo:rerun-if-changed=csrc/ffi.hpp"); } fn main() { // Misc variables let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); let build_profile = env::var("PROFILE").unwrap(); let (is_debug, opt_level) = match build_profile.as_ref() { "debug" => (true, "0"), "dev" => (true, "0"), _ => (false, "3"), }; // Build the backend let (_backend_path, deps_folder) = build_backend(is_debug, opt_level, &out_dir); // Build the FFI layer calling the backend above build_ffi_layer(&deps_folder, is_debug); // Emit linkage search path probe!("ompi", MPI_REQUIRED_VERSION); // Probe CUDA & co. with pkg-config CUDA_TRANSITIVE_DEPS.iter().for_each(|name| { probe!(name, CUDA_REQUIRED_VERSION); }); // NCCL is slightly trickier because it might not have a pkgconfig installed let nccl_library_path_default = format!("/usr/local/{}-linux-gnu", ARCH); let nccl_library_path = NCCL_ROOT_DIR.unwrap_or(&nccl_library_path_default); println!(r"cargo:rustc-link-search=native={}", nccl_library_path); println!("cargo:rustc-link-lib=dylib=nccl"); // TensorRT let tensort_library_path = TENSORRT_ROOT_DIR.unwrap_or("/usr/local/tensorrt/lib"); println!(r"cargo:rustc-link-search=native={}", tensort_library_path); println!("cargo:rustc-link-lib=dylib=nvinfer"); // TensorRT-LLM TENSORRT_LLM_TRANSITIVE_DEPS .iter() .for_each(|(link_type, name)| { println!("cargo:rustc-link-lib={}={}", link_type, name); }); // Backend println!("cargo:rustc-link-lib=static={}", &BACKEND_DEPS); }
text-generation-inference/backends/trtllm/build.rs/0
{ "file_path": "text-generation-inference/backends/trtllm/build.rs", "repo_id": "text-generation-inference", "token_count": 4237 }
293
// // Created by mfuntowicz on 12/3/24. // #include <catch2/catch_all.hpp> #include <nlohmann/json.hpp> #include <tensorrt_llm/executor/executor.h> #include "backend.hpp" using namespace huggingface::tgi::backends::trtllm; TEST_CASE("parse generation_config.json all set", "[generation_config_t]") { const json config_j = {{"temperature", 0.6}, {"top_p", 0.95}, {"eos_token_id", {1, 2, 3}}}; const auto generation_config = generation_config_t(config_j); REQUIRE_THAT(generation_config.temperature, Catch::Matchers::WithinAbs(0.6, 1e-6)); REQUIRE_THAT(generation_config.top_p, Catch::Matchers::WithinAbs(0.95, 1e-6)); // Stop words REQUIRE_FALSE(generation_config.stop_words.empty()); REQUIRE(generation_config.stop_words.size() == config_j["/eos_token_id"_json_pointer].size()); for (auto [lhs, rhs]: std::views::zip(generation_config.stop_words, std::list<std::vector<int32_t>>{{1}, {2}, {3}})) { // Currently we do not support multi-tokens stop words REQUIRE(lhs.size() == 1); REQUIRE(rhs.size() == 1); REQUIRE_THAT(lhs, Catch::Matchers::UnorderedEquals(rhs)); } } TEST_CASE("parse generation_config.json default", "[generation_config_t]") { const json config_j = {{"eos_token_id", {1, 2, 3}}}; const auto generation_config = generation_config_t(config_j); REQUIRE_THAT(generation_config.temperature, Catch::Matchers::WithinAbs(1.0, 1e-6)); REQUIRE_THAT(generation_config.top_p, Catch::Matchers::WithinAbs(1.0, 1e-6)); REQUIRE_FALSE(generation_config.stop_words.empty()); REQUIRE(generation_config.stop_words.size() == config_j["/eos_token_id"_json_pointer].size()); for (auto [lhs, rhs]: std::views::zip(generation_config.stop_words, std::list<std::vector<int32_t>>{{1}, {2}, {3}})) { // Currently we do not support multi-tokens stop words REQUIRE(lhs.size() == 1); REQUIRE(rhs.size() == 1); REQUIRE_THAT(lhs, Catch::Matchers::UnorderedEquals(rhs)); } } TEST_CASE("parse generation_config.json empty", "[generation_config_t]") { const json config_j = {{"eos_token_id", {}}}; const auto generation_config = generation_config_t(config_j); REQUIRE_THAT(generation_config.temperature, Catch::Matchers::WithinAbs(1.0, 1e-6)); REQUIRE_THAT(generation_config.top_p, Catch::Matchers::WithinAbs(1.0, 1e-6)); REQUIRE(generation_config.stop_words.empty()); const json config_j2 = {}; const auto generation_config2 = generation_config_t(config_j); REQUIRE_THAT(generation_config2.temperature, Catch::Matchers::WithinAbs(1.0, 1e-6)); REQUIRE_THAT(generation_config2.top_p, Catch::Matchers::WithinAbs(1.0, 1e-6)); REQUIRE(generation_config2.stop_words.empty()); } TEST_CASE("parallel_config single", "[backend_workspace_t]") { // Generate temporary folder const auto tmp_p = std::filesystem::temp_directory_path(); const auto config_p = tmp_p / "config.json"; const auto generation_config_p = tmp_p / "generation_config.json"; // Generate content std::ofstream o_config(config_p); o_config << R"({"pretrained_config": {"mapping": {"world_size": 2}}})"_json; o_config.close(); std::ofstream o_generation_config(generation_config_p); o_generation_config << R"({"eos_token_id": []})"_json; o_generation_config.close(); const auto workspace = backend_workspace_t(tmp_p.generic_string(), tmp_p.generic_string()); const auto parallel = workspace.parallel_config(); REQUIRE(parallel.getCommunicationMode() == tle::CommunicationMode::kORCHESTRATOR); REQUIRE(parallel.getCommunicationType() == tle::CommunicationType::kMPI); std::filesystem::remove(config_p); std::filesystem::remove(generation_config_p); } TEST_CASE("parallel_config multi", "[backend_workspace_t]") { // Generate temporary folder const auto tmp_p = std::filesystem::temp_directory_path(); const auto config_p = tmp_p / "config.json"; const auto generation_config_p = tmp_p / "generation_config.json"; // Generate content std::ofstream o_config(config_p); o_config << R"({"pretrained_config": {"mapping": {"world_size": 1}}})"_json; o_config.close(); std::ofstream o_generation_config(generation_config_p); o_generation_config << R"({"eos_token_id": []})"_json; o_generation_config.close(); const auto workspace = backend_workspace_t(tmp_p.generic_string(), tmp_p.generic_string()); const auto parallel = workspace.parallel_config(); REQUIRE(parallel.getCommunicationMode() == tle::CommunicationMode::kLEADER); REQUIRE(parallel.getCommunicationType() == tle::CommunicationType::kMPI); std::filesystem::remove(config_p); std::filesystem::remove(generation_config_p); } TEST_CASE("executor_config", "[backend_workspace_t]") { } TEST_CASE("sampling_params_t to tle::SamplingConfig", "[backend_t]") { const sampling_params_t params = {40, 0.95, 0.9, 1.0, 0.6, 2014}; const auto config = static_cast<tle::SamplingConfig>(params); REQUIRE(config.getTopK().has_value()); REQUIRE(config.getTopK().value() == params.top_k); REQUIRE(config.getSeed().has_value()); REQUIRE(config.getSeed().value() == params.seed); REQUIRE(config.getTopP().has_value()); REQUIRE_THAT(*config.getTopP(), Catch::Matchers::WithinAbs(params.top_p, 1e-6f)); REQUIRE(config.getRepetitionPenalty().has_value()); REQUIRE_THAT(*config.getRepetitionPenalty(), Catch::Matchers::WithinAbs(params.repetition_penalty, 1e-6f)); REQUIRE(config.getFrequencyPenalty().has_value()); REQUIRE_THAT(*config.getFrequencyPenalty(), Catch::Matchers::WithinAbs(params.frequency_penalty, 1e-6f)); REQUIRE(config.getTemperature().has_value()); REQUIRE_THAT(*config.getTemperature(), Catch::Matchers::WithinAbs(params.temperature, 1e-6f)); }
text-generation-inference/backends/trtllm/tests/test_backend.cpp/0
{ "file_path": "text-generation-inference/backends/trtllm/tests/test_backend.cpp", "repo_id": "text-generation-inference", "token_count": 2696 }
294
/// Single shard Client use crate::client::{pb, Chunk}; use crate::client::{ClientError, Result, WARMUP_IMAGE_BASE64}; use base64::engine::general_purpose::STANDARD; use base64::Engine; use grpc_metadata::InjectTelemetryContext; use pb::generate::v3::text_generation_service_client::TextGenerationServiceClient; use pb::generate::v3::*; use std::cmp::min; use std::time::Duration; use tonic::transport::{Channel, Uri}; use tracing::instrument; /// Text Generation Inference gRPC client #[derive(Debug, Clone)] pub struct Client { stub: TextGenerationServiceClient<Channel>, } impl Client { /// Returns a client connected to the given url #[allow(dead_code)] pub async fn connect(uri: Uri) -> Result<Self> { let channel = Channel::builder(uri).connect().await?; Ok(Self { stub: TextGenerationServiceClient::new(channel), }) } /// Returns a client connected to the given unix socket pub async fn connect_uds(path: String) -> Result<Self> { let channel = Channel::from_shared("http://[::]:50051".to_string()) .unwrap() .connect_with_connector(tower::service_fn(move |_: Uri| { tokio::net::UnixStream::connect(path.clone()) })) .await?; Ok(Self { stub: TextGenerationServiceClient::new(channel), }) } /// Returns a list of uris or unix sockets of all shards #[instrument(skip(self))] pub async fn service_discovery(&mut self) -> Result<Vec<String>> { let request = tonic::Request::new(ServiceDiscoveryRequest {}).inject_context(); let response = self.stub.service_discovery(request).await.map_err(|_| { ClientError::Connection("Server does not support v3 interface".to_string()) })?; let urls = response .into_inner() .urls .into_iter() // Remove unix socket prefix .map(|url| match url.strip_prefix("unix://") { None => url, Some(stripped_url) => stripped_url.to_string(), }) .collect(); Ok(urls) } /// Get model info #[instrument(skip(self))] pub async fn info(&mut self) -> Result<InfoResponse> { let request = tonic::Request::new(InfoRequest {}).inject_context(); let response = self.stub.info(request).await?.into_inner(); Ok(response) } /// Get model health #[instrument(skip(self))] pub async fn health(&mut self) -> Result<HealthResponse> { let request = tonic::Request::new(HealthRequest {}).inject_context(); let response = self.stub.health(request).await?.into_inner(); Ok(response) } /// Clear the past generations cache #[instrument(skip(self))] pub async fn clear_cache(&mut self, batch_id: Option<u64>) -> Result<()> { let request = tonic::Request::new(ClearCacheRequest { id: batch_id }).inject_context(); self.stub.clear_cache(request).await?; Ok(()) } /// Filter a cached batch #[instrument(skip(self))] pub async fn filter_batch( &mut self, batch_id: u64, request_ids: Vec<u64>, ) -> Result<Option<CachedBatch>> { let request = tonic::Request::new(FilterBatchRequest { batch_id, request_ids, }) .inject_context(); let filtered_batch = self.stub.filter_batch(request).await?.into_inner(); Ok(filtered_batch.batch) } /// Warmup on a max size batch /// /// Returns the maximum amount of tokens supported by the hardware #[instrument(skip_all)] pub async fn warmup( &mut self, max_input_tokens: Option<u32>, max_prefill_tokens: u32, max_total_tokens: Option<u32>, max_batch_size: Option<usize>, ) -> Result<(Option<u32>, u32, u32)> { let mut n_tokens = 0; let mut requests = Vec::new(); // Create requests while n_tokens < max_prefill_tokens { let mut truncate = max_prefill_tokens - n_tokens; if let Some(max_input_tokens) = max_input_tokens { truncate = min(max_input_tokens, truncate); } let mut input_chunks = Vec::new(); input_chunks.push(Chunk::Text("_test ".to_string().repeat(truncate as usize)).into()); if n_tokens == 0 { input_chunks.push( Chunk::Image(Image { // Safe unwrap, because we control the data. data: STANDARD.decode(WARMUP_IMAGE_BASE64).unwrap(), mimetype: "image/jpeg;base64".to_string(), }) .into(), ); } // Send stringly-typed inputs for compatibility for backends that haven't // been updated to support chunks. let mut inputs = String::new(); inputs.push_str(&"_test ".to_string().repeat(truncate as usize)); if n_tokens == 0 { // 1 request is enough to test vision heads. // Sending images on other queries messes up easily with truncation. inputs.push_str(&format!( "![](data:image/jpeg;base64,{WARMUP_IMAGE_BASE64})", )); } let max_new_tokens = if let Some(max_total_tokens) = max_total_tokens { max_total_tokens - truncate } else { 1 }; requests.push(Request { id: 0, inputs, add_special_tokens: true, input_chunks: Some(Input { chunks: input_chunks, }), // We truncate the input on the server side to be sure that it has the correct size truncate, // Blocks and slots will be set on the server side if we use paged attention blocks: vec![], slots: vec![], cache_len: 0, chunk_len: None, // Set sampling parameters to also take these ops into account in the max memory parameters: Some(NextTokenChooserParameters { temperature: 0.9, top_k: 10, top_p: 0.9, typical_p: 0.9, do_sample: false, seed: 0, repetition_penalty: 1.2, frequency_penalty: 0.1, watermark: true, grammar: String::new(), grammar_type: GrammarType::None as i32, }), stopping_parameters: Some(StoppingCriteriaParameters { max_new_tokens, stop_sequences: vec![], ignore_eos_token: true, }), prefill_logprobs: true, top_n_tokens: 20, adapter_id: None, }); n_tokens += truncate; // Check max_batch_size if Some(requests.len()) == max_batch_size { break; } } let batch = Batch { id: 0, size: requests.len() as u32, requests, max_tokens: max_input_tokens.unwrap_or(0), max_blocks: 0, }; let request = tonic::Request::new(WarmupRequest { batch: Some(batch), max_input_tokens, max_prefill_tokens, max_total_tokens, }) .inject_context(); let response = self.stub.warmup(request).await?.into_inner(); Ok(( response.max_supported_total_tokens, response.max_input_tokens, response.max_total_tokens, )) } /// Generate one token for each request in the given batch /// /// Returns Generation for each request in batch /// and the next cached batch #[instrument(skip_all, fields(id = &batch.id, size = &batch.size))] pub async fn prefill( &mut self, batch: Batch, cached_batch: Option<CachedBatch>, ) -> Result<(Vec<Generation>, Option<CachedBatch>, PrefillTimings)> { let request = tonic::Request::new(PrefillRequest { batch: Some(batch), cached_batch, }) .inject_context(); let response = self.stub.prefill(request).await?.into_inner(); Ok(( response.generations, response.batch, PrefillTimings::new( response.concat_ns, response.forward_ns, response.decode_ns, response.total_ns, ), )) } /// Generate one token for each request in the given cached batches /// /// Returns Generation for each request in batches /// and the next cached batch #[instrument(skip_all, fields(size = batches.iter().map(|batch|{batch.size}).sum::<u32>()))] pub async fn decode( &mut self, batches: Vec<CachedBatch>, ) -> Result<(Vec<Generation>, Option<CachedBatch>, DecodeTimings)> { let request = tonic::Request::new(DecodeRequest { batches }).inject_context(); let response = self.stub.decode(request).await?.into_inner(); Ok(( response.generations, response.batch, DecodeTimings::new( response.concat_ns, response.forward_ns, response.decode_ns, response.total_ns, ), )) } } pub struct PrefillTimings { pub concat: Option<Duration>, pub forward: Duration, pub decode: Duration, pub total: Duration, } impl PrefillTimings { fn new(concat_ns: Option<u64>, forward_ns: u64, decode_ns: u64, total_ns: u64) -> Self { Self { concat: concat_ns.map(Duration::from_nanos), forward: Duration::from_nanos(forward_ns), decode: Duration::from_nanos(decode_ns), total: Duration::from_nanos(total_ns), } } } pub struct DecodeTimings { pub concat: Option<Duration>, pub forward: Duration, pub decode: Duration, pub total: Duration, } impl DecodeTimings { fn new(concat_ns: Option<u64>, forward_ns: u64, decode_ns: u64, total_ns: u64) -> Self { Self { concat: concat_ns.map(Duration::from_nanos), forward: Duration::from_nanos(forward_ns), decode: Duration::from_nanos(decode_ns), total: Duration::from_nanos(total_ns), } } }
text-generation-inference/backends/v3/src/client/grpc_client.rs/0
{ "file_path": "text-generation-inference/backends/v3/src/client/grpc_client.rs", "repo_id": "text-generation-inference", "token_count": 5255 }
295
Documentation available at: https://huggingface.co/docs/text-generation-inference ## Release When making a release, please update the latest version in the documentation with: ``` export OLD_VERSION="2\.0\.3" export NEW_VERSION="2\.0\.4" find . -name '*.md' -exec sed -i -e "s/$OLD_VERSION/$NEW_VERSION/g" {} \; ```
text-generation-inference/docs/README.md/0
{ "file_path": "text-generation-inference/docs/README.md", "repo_id": "text-generation-inference", "token_count": 107 }
296
# Using TGI CLI You can use TGI command-line interface (CLI) to download weights, serve and quantize models, or get information on serving parameters. To install the CLI, please refer to [the installation section](../installation#install-cli). `text-generation-server` lets you download the model with `download-weights` command like below ๐Ÿ‘‡ ```bash text-generation-server download-weights MODEL_HUB_ID ``` You can also use it to quantize models like below ๐Ÿ‘‡ ```bash text-generation-server quantize MODEL_HUB_ID OUTPUT_DIR ``` You can use `text-generation-launcher` to serve models. ```bash text-generation-launcher --model-id MODEL_HUB_ID --port 8080 ``` There are many options and parameters you can pass to `text-generation-launcher`. The documentation for CLI is kept minimal and intended to rely on self-generating documentation, which can be found by running ```bash text-generation-launcher --help ``` You can also find it hosted in this [Swagger UI](https://huggingface.github.io/text-generation-inference/). Same documentation can be found for `text-generation-server`. ```bash text-generation-server --help ```
text-generation-inference/docs/source/basic_tutorials/using_cli.md/0
{ "file_path": "text-generation-inference/docs/source/basic_tutorials/using_cli.md", "repo_id": "text-generation-inference", "token_count": 323 }
297
# Using TGI with AMD GPUs TGI is supported and tested on [AMD Instinct MI210](https://www.amd.com/en/products/accelerators/instinct/mi200/mi210.html), [MI250](https://www.amd.com/en/products/accelerators/instinct/mi200/mi250.html) and [MI300](https://www.amd.com/en/products/accelerators/instinct/mi300.html) GPUs. The support may be extended in the future. The recommended usage is through Docker. Make sure to check the [AMD documentation](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html) on how to use Docker with AMD GPUs. On a server powered by AMD GPUs, TGI can be launched with the following command: ```bash model=teknium/OpenHermes-2.5-Mistral-7B volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \ --device=/dev/kfd --device=/dev/dri --group-add video \ --ipc=host --shm-size 256g --net host -v $volume:/data \ ghcr.io/huggingface/text-generation-inference:3.3.4-rocm \ --model-id $model ``` The launched TGI server can then be queried from clients, make sure to check out the [Consuming TGI](./basic_tutorials/consuming_tgi) guide. ## TunableOp TGI's docker image for AMD GPUs integrates [PyTorch's TunableOp](https://github.com/pytorch/pytorch/tree/main/aten/src/ATen/cuda/tunable), which allows to do an additional warmup to select the best performing matrix multiplication (GEMM) kernel from rocBLAS or hipBLASLt. Experimentally, on MI300X, we noticed a 6-8% latency improvement when using TunableOp on top of ROCm 6.1 and PyTorch 2.3. TunableOp is enabled by default, the warmup may take 1-2 minutes. In case you would like to disable TunableOp, please pass `--env PYTORCH_TUNABLEOP_ENABLED="0"` when launcher TGI's docker container. ## Flash attention implementation Two implementations of Flash Attention are available for ROCm, the first is [ROCm/flash-attention](https://github.com/ROCm/flash-attention) based on a [Composable Kernel](https://github.com/ROCm/composable_kernel) (CK) implementation, and the second is a [Triton implementation](https://github.com/huggingface/text-generation-inference/blob/main/server/text_generation_server/layers/attention/flash_attn_triton.py). By default, the Composable Kernel implementation is used. However, the Triton implementation has slightly lower latency on MI250 and MI300, but requires a warmup which can be prohibitive as it needs to be done again for each new prompt length. If needed, FA Triton impelmentation can be enabled with `--env ROCM_USE_FLASH_ATTN_V2_TRITON="0"` when launching TGI's docker container. ## Custom PagedAttention For better performance on ROCm, a custom Paged Attention kernel is available and is enabled by default. To disable it and fall back to the PagedAttention v2 kernel, set the environment variable `ROCM_USE_CUSTOM_PAGED_ATTN=0`. The custom kernel supports bf16 and fp16 data types, block size of 16, head size of 128, a maximum context length of 16k, and GQA ratios between 1 and 16. For other configurations, we use the PagedAttention v2 kernel. ## Unsupported features The following features are currently not supported in the ROCm version of TGI, and the support may be extended in the future: * Loading [AWQ](https://huggingface.co/docs/transformers/quantization#awq) checkpoints. * Kernel for sliding window attention (Mistral)
text-generation-inference/docs/source/installation_amd.md/0
{ "file_path": "text-generation-inference/docs/source/installation_amd.md", "repo_id": "text-generation-inference", "token_count": 1047 }
298
import asyncio import contextlib import os import shlex import subprocess import sys import threading import time from tempfile import TemporaryDirectory from typing import List import socket import docker import pytest from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError from docker.errors import NotFound import logging from huggingface_hub import AsyncInferenceClient, TextGenerationOutput import huggingface_hub logging.basicConfig( level=logging.INFO, format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>", stream=sys.stdout, ) logger = logging.getLogger(__file__) # Use the latest image from the local docker build DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", "tgi-gaudi") DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", None) HF_TOKEN = huggingface_hub.get_token() assert ( HF_TOKEN is not None ), "HF_TOKEN is not set, please set it as some models are gated and thus the test will fail without it" if DOCKER_VOLUME is None: logger.warning( "DOCKER_VOLUME is not set, this will lead to the tests redownloading the models on each run, consider setting it to speed up testing" ) LOG_LEVEL = os.getenv("LOG_LEVEL", "info") BASE_ENV = { "HF_HUB_ENABLE_HF_TRANSFER": "1", "LOG_LEVEL": LOG_LEVEL, "HF_TOKEN": os.getenv("HF_TOKEN", None), } HABANA_RUN_ARGS = { "runtime": "habana", "ipc_mode": "host", "cap_add": ["sys_nice"], } def stream_container_logs(container, test_name): """Stream container logs in a separate thread.""" try: for log in container.logs(stream=True, follow=True): print( f"[TGI Server Logs - {test_name}] {log.decode('utf-8')}", end="", file=sys.stderr, flush=True, ) except Exception as e: logger.error(f"Error streaming container logs: {str(e)}") class TestClient(AsyncInferenceClient): def __init__(self, service_name: str, base_url: str): super().__init__(model=base_url) self.service_name = service_name class LauncherHandle: def __init__(self, service_name: str, port: int): self.client = TestClient(service_name, f"http://localhost:{port}") def _inner_health(self): raise NotImplementedError async def health(self, timeout: int = 60): assert timeout > 0 start_time = time.time() logger.info(f"Starting health check with timeout of {timeout}s") for attempt in range(timeout): if not self._inner_health(): logger.error("Launcher crashed during health check") raise RuntimeError("Launcher crashed") try: await self.client.text_generation("test", max_new_tokens=1) elapsed = time.time() - start_time logger.info(f"Health check passed after {elapsed:.1f}s") return except (ClientConnectorError, ClientOSError, ServerDisconnectedError) as e: if attempt == timeout - 1: logger.error(f"Health check failed after {timeout}s: {str(e)}") raise RuntimeError(f"Health check failed: {str(e)}") if attempt % 10 == 0 and attempt != 0: # Only log every 10th attempt logger.debug( f"Connection attempt {attempt}/{timeout} failed: {str(e)}" ) time.sleep(1) except Exception as e: logger.error(f"Unexpected error during health check: {str(e)}") # Get full traceback for debugging import traceback logger.error(f"Full traceback:\n{traceback.format_exc()}") raise class ContainerLauncherHandle(LauncherHandle): def __init__(self, docker_client, container_name, port: int): service_name = container_name # Use container name as service name super(ContainerLauncherHandle, self).__init__(service_name, port) self.docker_client = docker_client self.container_name = container_name def _inner_health(self) -> bool: try: container = self.docker_client.containers.get(self.container_name) status = container.status if status not in ["running", "created"]: logger.warning(f"Container status is {status}") # Get container logs for debugging logs = container.logs().decode("utf-8") logger.debug(f"Container logs:\n{logs}") return status in ["running", "created"] except Exception as e: logger.error(f"Error checking container health: {str(e)}") return False class ProcessLauncherHandle(LauncherHandle): def __init__(self, process, port: int): service_name = "process" # Use generic name for process launcher super(ProcessLauncherHandle, self).__init__(service_name, port) self.process = process def _inner_health(self) -> bool: return self.process.poll() is None @pytest.fixture(scope="module") def data_volume(): tmpdir = TemporaryDirectory() yield tmpdir.name try: # Cleanup the temporary directory using sudo as it contains root files created by the container subprocess.run(shlex.split(f"sudo rm -rf {tmpdir.name}"), check=True) except subprocess.CalledProcessError as e: logger.error(f"Error cleaning up temporary directory: {str(e)}") @pytest.fixture(scope="module") def gaudi_launcher(): @contextlib.contextmanager def docker_launcher( model_id: str, test_name: str, tgi_args: List[str] = None, env_config: dict = None, ): logger.info( f"Starting docker launcher for model {model_id} and test {test_name}" ) # Get a random available port def get_free_port(): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(("", 0)) s.listen(1) port = s.getsockname()[1] return port port = get_free_port() logger.debug(f"Using port {port}") client = docker.from_env() container_name = f"tgi-gaudi-test-{test_name.replace('/', '-')}" try: container = client.containers.get(container_name) logger.info( f"Stopping existing container {container_name} for test {test_name}" ) container.stop() container.wait() container.remove() logger.info(f"Removed existing container {container_name}") except NotFound: pass except Exception as e: logger.error(f"Error handling existing container: {str(e)}") if tgi_args is None: tgi_args = [] else: tgi_args = tgi_args.copy() env = BASE_ENV.copy() # Add model_id to env env["MODEL_ID"] = model_id # Add env config that is defined in the fixture parameter if env_config is not None: env.update(env_config.copy()) volumes = [] if DOCKER_VOLUME: volumes = [f"{DOCKER_VOLUME}:/data"] logger.debug(f"Using volume {volumes}") try: logger.debug(f"Using command {tgi_args}") logger.info(f"Creating container with name {container_name}") logger.debug(f"Using environment {env}") logger.debug(f"Using volumes {volumes}") logger.debug(f"HABANA_RUN_ARGS {HABANA_RUN_ARGS}") # Log equivalent docker run command for debugging, this is not actually executed container = client.containers.run( DOCKER_IMAGE, command=tgi_args, name=container_name, environment=env, detach=True, volumes=volumes, ports={"80/tcp": port}, **HABANA_RUN_ARGS, ) logger.info(f"Container {container_name} started successfully") # Start log streaming in a background thread log_thread = threading.Thread( target=stream_container_logs, args=(container, test_name), daemon=True, # This ensures the thread will be killed when the main program exits ) log_thread.start() # Add a small delay to allow container to initialize time.sleep(2) # Check container status after creation status = container.status logger.debug(f"Initial container status: {status}") if status not in ["running", "created"]: logs = container.logs().decode("utf-8") logger.error(f"Container failed to start properly. Logs:\n{logs}") yield ContainerLauncherHandle(client, container.name, port) except Exception as e: logger.error(f"Error starting container: {str(e)}") # Get full traceback for debugging import traceback logger.error(f"Full traceback:\n{traceback.format_exc()}") raise finally: try: container = client.containers.get(container_name) logger.info(f"Stopping container {container_name}") container.stop() container.wait() container_output = container.logs().decode("utf-8") print(container_output, file=sys.stderr) container.remove() logger.info(f"Container {container_name} removed successfully") except NotFound: pass except Exception as e: logger.warning(f"Error cleaning up container: {str(e)}") return docker_launcher @pytest.fixture(scope="module") def gaudi_generate_load(): async def generate_load_inner( client: AsyncInferenceClient, prompt: str, max_new_tokens: int, n: int ) -> List[TextGenerationOutput]: try: futures = [ client.text_generation( prompt, max_new_tokens=max_new_tokens, details=True, decoder_input_details=True, ) for _ in range(n) ] return await asyncio.gather(*futures) except Exception as e: logger.error(f"Error generating load: {str(e)}") raise return generate_load_inner
text-generation-inference/integration-tests/fixtures/gaudi/service.py/0
{ "file_path": "text-generation-inference/integration-tests/fixtures/gaudi/service.py", "repo_id": "text-generation-inference", "token_count": 4842 }
299
[ { "choices": [ { "delta": { "content": "OK", "function_call": null, "refusal": null, "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741265134, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "service_tier": null, "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "!", "function_call": null, "refusal": null, "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741265134, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "service_tier": null, "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "", "function_call": null, "refusal": null, "role": "assistant", "tool_calls": null }, "finish_reason": "stop", "index": 0, "logprobs": null } ], "created": 1741265134, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "service_tier": null, "system_fingerprint": "3.1.2-dev0-native", "usage": null } ]
text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_chat_openai_nousage.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_chat_openai_nousage.json", "repo_id": "text-generation-inference", "token_count": 861 }
300
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": 0, "tokens": [ { "id": 836, "logprob": -1.265625, "special": false, "text": " i" }, { "id": 18, "logprob": -0.119628906, "special": false, "text": "'" }, { "id": 298, "logprob": -2.265625, "special": false, "text": "ve" }, { "id": 650, "logprob": -0.49804688, "special": false, "text": " been" }, { "id": 1241, "logprob": 0.0, "special": false, "text": " using" }, { "id": 334, "logprob": 0.0, "special": false, "text": " it" }, { "id": 312, "logprob": -1.2421875, "special": false, "text": " for" }, { "id": 909, "logprob": -0.99609375, "special": false, "text": " years" }, { "id": 193, "logprob": -0.30273438, "special": false, "text": "\n" }, { "id": 807, "logprob": -1.078125, "special": false, "text": "ik" } ] }, "generated_text": "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron: i've been using it for years\nik" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_all_params.json", "repo_id": "text-generation-inference", "token_count": 905 }
301
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -1.1845703, "special": false, "text": "\n" }, { "id": 2772, "logprob": -0.5727539, "special": false, "text": "De" }, { "id": 1022, "logprob": -0.00010967255, "special": false, "text": "ep" }, { "id": 6509, "logprob": -0.1239624, "special": false, "text": " learning" }, { "id": 338, "logprob": -0.04510498, "special": false, "text": " is" }, { "id": 263, "logprob": -0.018295288, "special": false, "text": " a" }, { "id": 11306, "logprob": -0.45922852, "special": false, "text": " subset" }, { "id": 310, "logprob": -0.00020992756, "special": false, "text": " of" }, { "id": 4933, "logprob": -0.0046539307, "special": false, "text": " machine" }, { "id": 6509, "logprob": -0.00025844574, "special": false, "text": " learning" } ] }, "generated_text": "\nDeep learning is a subset of machine learning" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -1.1826172, "special": false, "text": "\n" }, { "id": 2772, "logprob": -0.56689453, "special": false, "text": "De" }, { "id": 1022, "logprob": -0.000108003616, "special": false, "text": "ep" }, { "id": 6509, "logprob": -0.1239624, "special": false, "text": " learning" }, { "id": 338, "logprob": -0.044433594, "special": false, "text": " is" }, { "id": 263, "logprob": -0.018295288, "special": false, "text": " a" }, { "id": 11306, "logprob": -0.45922852, "special": false, "text": " subset" }, { "id": 310, "logprob": -0.0002104044, "special": false, "text": " of" }, { "id": 4933, "logprob": -0.004711151, "special": false, "text": " machine" }, { "id": 6509, "logprob": -0.00025892258, "special": false, "text": " learning" } ] }, "generated_text": "\nDeep learning is a subset of machine learning" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -1.1826172, "special": false, "text": "\n" }, { "id": 2772, "logprob": -0.56689453, "special": false, "text": "De" }, { "id": 1022, "logprob": -0.000108003616, "special": false, "text": "ep" }, { "id": 6509, "logprob": -0.1239624, "special": false, "text": " learning" }, { "id": 338, "logprob": -0.044433594, "special": false, "text": " is" }, { "id": 263, "logprob": -0.018295288, "special": false, "text": " a" }, { "id": 11306, "logprob": -0.45922852, "special": false, "text": " subset" }, { "id": 310, "logprob": -0.0002104044, "special": false, "text": " of" }, { "id": 4933, "logprob": -0.004711151, "special": false, "text": " machine" }, { "id": 6509, "logprob": -0.00025892258, "special": false, "text": " learning" } ] }, "generated_text": "\nDeep learning is a subset of machine learning" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -1.1826172, "special": false, "text": "\n" }, { "id": 2772, "logprob": -0.56689453, "special": false, "text": "De" }, { "id": 1022, "logprob": -0.000108003616, "special": false, "text": "ep" }, { "id": 6509, "logprob": -0.1239624, "special": false, "text": " learning" }, { "id": 338, "logprob": -0.044433594, "special": false, "text": " is" }, { "id": 263, "logprob": -0.018295288, "special": false, "text": " a" }, { "id": 11306, "logprob": -0.45922852, "special": false, "text": " subset" }, { "id": 310, "logprob": -0.0002104044, "special": false, "text": " of" }, { "id": 4933, "logprob": -0.004711151, "special": false, "text": " machine" }, { "id": 6509, "logprob": -0.00025892258, "special": false, "text": " learning" } ] }, "generated_text": "\nDeep learning is a subset of machine learning" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_load.json", "repo_id": "text-generation-inference", "token_count": 4006 }
302