text
stringlengths 7
1.24M
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
519
|
|---|---|---|---|
import hydra
from omegaconf import DictConfig
def make_robot(cfg: DictConfig):
robot = hydra.utils.instantiate(cfg)
return robot
|
lerobot/lerobot/common/robot_devices/robots/factory.py/0
|
{
"file_path": "lerobot/lerobot/common/robot_devices/robots/factory.py",
"repo_id": "lerobot",
"token_count": 49
}
| 168
|
# @package _global_
# Use `act_real.yaml` to train on real-world Aloha/Aloha2 datasets.
# Compared to `act.yaml`, it contains 4 cameras (i.e. cam_right_wrist, cam_left_wrist, images,
# cam_low) instead of 1 camera (i.e. top). Also, `training.eval_freq` is set to -1. This config is used
# to evaluate checkpoints at a certain frequency of training steps. When it is set to -1, it deactivates evaluation.
# This is because real-world evaluation is done through [dora-lerobot](https://github.com/dora-rs/dora-lerobot).
# Look at its README for more information on how to evaluate a checkpoint in the real-world.
#
# Example of usage for training:
# ```bash
# python lerobot/scripts/train.py \
# policy=act_real \
# env=dora_aloha_real
# ```
seed: 1000
dataset_repo_id: lerobot/aloha_static_vinh_cup
override_dataset_stats:
observation.images.cam_right_wrist:
# stats from imagenet, since we use a pretrained vision model
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
observation.images.cam_left_wrist:
# stats from imagenet, since we use a pretrained vision model
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
observation.images.cam_high:
# stats from imagenet, since we use a pretrained vision model
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
observation.images.cam_low:
# stats from imagenet, since we use a pretrained vision model
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
training:
offline_steps: 100000
online_steps: 0
eval_freq: -1
save_freq: 20000
save_checkpoint: true
batch_size: 8
lr: 1e-5
lr_backbone: 1e-5
weight_decay: 1e-4
grad_clip_norm: 10
online_steps_between_rollouts: 1
delta_timestamps:
action: "[i / ${fps} for i in range(${policy.chunk_size})]"
eval:
n_episodes: 50
batch_size: 50
# See `configuration_act.py` for more details.
policy:
name: act
# Input / output structure.
n_obs_steps: 1
chunk_size: 100 # chunk_size
n_action_steps: 100
input_shapes:
# TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
observation.images.cam_right_wrist: [3, 480, 640]
observation.images.cam_left_wrist: [3, 480, 640]
observation.images.cam_high: [3, 480, 640]
observation.images.cam_low: [3, 480, 640]
observation.state: ["${env.state_dim}"]
output_shapes:
action: ["${env.action_dim}"]
# Normalization / Unnormalization
input_normalization_modes:
observation.images.cam_right_wrist: mean_std
observation.images.cam_left_wrist: mean_std
observation.images.cam_high: mean_std
observation.images.cam_low: mean_std
observation.state: mean_std
output_normalization_modes:
action: mean_std
# Architecture.
# Vision backbone.
vision_backbone: resnet18
pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
replace_final_stride_with_dilation: false
# Transformer layers.
pre_norm: false
dim_model: 512
n_heads: 8
dim_feedforward: 3200
feedforward_activation: relu
n_encoder_layers: 4
# Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
# that means only the first layer is used. Here we match the original implementation by setting this to 1.
# See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
n_decoder_layers: 1
# VAE.
use_vae: true
latent_dim: 32
n_vae_encoder_layers: 4
# Inference.
temporal_ensemble_coeff: null
# Training and loss computation.
dropout: 0.1
kl_weight: 10.0
|
lerobot/lerobot/configs/policy/act_real.yaml/0
|
{
"file_path": "lerobot/lerobot/configs/policy/act_real.yaml",
"repo_id": "lerobot",
"token_count": 1466
}
| 169
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Visualize effects of image transforms for a given configuration.
This script will generate examples of transformed images as they are output by LeRobot dataset.
Additionally, each individual transform can be visualized separately as well as examples of combined transforms
--- Usage Examples ---
Increase hue jitter
```
python lerobot/scripts/visualize_image_transforms.py \
dataset_repo_id=lerobot/aloha_mobile_shrimp \
training.image_transforms.hue.min_max="[-0.25,0.25]"
```
Increase brightness & brightness weight
```
python lerobot/scripts/visualize_image_transforms.py \
dataset_repo_id=lerobot/aloha_mobile_shrimp \
training.image_transforms.brightness.weight=10.0 \
training.image_transforms.brightness.min_max="[1.0,2.0]"
```
Blur images and disable saturation & hue
```
python lerobot/scripts/visualize_image_transforms.py \
dataset_repo_id=lerobot/aloha_mobile_shrimp \
training.image_transforms.sharpness.weight=10.0 \
training.image_transforms.sharpness.min_max="[0.0,1.0]" \
training.image_transforms.saturation.weight=0.0 \
training.image_transforms.hue.weight=0.0
```
Use all transforms with random order
```
python lerobot/scripts/visualize_image_transforms.py \
dataset_repo_id=lerobot/aloha_mobile_shrimp \
training.image_transforms.max_num_transforms=5 \
training.image_transforms.random_order=true
```
"""
from pathlib import Path
import hydra
from torchvision.transforms import ToPILImage
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
from lerobot.common.datasets.transforms import get_image_transforms
OUTPUT_DIR = Path("outputs/image_transforms")
to_pil = ToPILImage()
def save_config_all_transforms(cfg, original_frame, output_dir, n_examples):
tf = get_image_transforms(
brightness_weight=cfg.brightness.weight,
brightness_min_max=cfg.brightness.min_max,
contrast_weight=cfg.contrast.weight,
contrast_min_max=cfg.contrast.min_max,
saturation_weight=cfg.saturation.weight,
saturation_min_max=cfg.saturation.min_max,
hue_weight=cfg.hue.weight,
hue_min_max=cfg.hue.min_max,
sharpness_weight=cfg.sharpness.weight,
sharpness_min_max=cfg.sharpness.min_max,
max_num_transforms=cfg.max_num_transforms,
random_order=cfg.random_order,
)
output_dir_all = output_dir / "all"
output_dir_all.mkdir(parents=True, exist_ok=True)
for i in range(1, n_examples + 1):
transformed_frame = tf(original_frame)
to_pil(transformed_frame).save(output_dir_all / f"{i}.png", quality=100)
print("Combined transforms examples saved to:")
print(f" {output_dir_all}")
def save_config_single_transforms(cfg, original_frame, output_dir, n_examples):
transforms = [
"brightness",
"contrast",
"saturation",
"hue",
"sharpness",
]
print("Individual transforms examples saved to:")
for transform in transforms:
# Apply one transformation with random value in min_max range
kwargs = {
f"{transform}_weight": cfg[f"{transform}"].weight,
f"{transform}_min_max": cfg[f"{transform}"].min_max,
}
tf = get_image_transforms(**kwargs)
output_dir_single = output_dir / f"{transform}"
output_dir_single.mkdir(parents=True, exist_ok=True)
for i in range(1, n_examples + 1):
transformed_frame = tf(original_frame)
to_pil(transformed_frame).save(output_dir_single / f"{i}.png", quality=100)
# Apply min transformation
min_value, max_value = cfg[f"{transform}"].min_max
kwargs = {
f"{transform}_weight": cfg[f"{transform}"].weight,
f"{transform}_min_max": (min_value, min_value),
}
tf = get_image_transforms(**kwargs)
transformed_frame = tf(original_frame)
to_pil(transformed_frame).save(output_dir_single / "min.png", quality=100)
# Apply max transformation
kwargs = {
f"{transform}_weight": cfg[f"{transform}"].weight,
f"{transform}_min_max": (max_value, max_value),
}
tf = get_image_transforms(**kwargs)
transformed_frame = tf(original_frame)
to_pil(transformed_frame).save(output_dir_single / "max.png", quality=100)
# Apply mean transformation
mean_value = (min_value + max_value) / 2
kwargs = {
f"{transform}_weight": cfg[f"{transform}"].weight,
f"{transform}_min_max": (mean_value, mean_value),
}
tf = get_image_transforms(**kwargs)
transformed_frame = tf(original_frame)
to_pil(transformed_frame).save(output_dir_single / "mean.png", quality=100)
print(f" {output_dir_single}")
def visualize_transforms(cfg, output_dir: Path, n_examples: int = 5):
dataset = LeRobotDataset(cfg.dataset_repo_id)
output_dir = output_dir / cfg.dataset_repo_id.split("/")[-1]
output_dir.mkdir(parents=True, exist_ok=True)
# Get 1st frame from 1st camera of 1st episode
original_frame = dataset[0][dataset.camera_keys[0]]
to_pil(original_frame).save(output_dir / "original_frame.png", quality=100)
print("\nOriginal frame saved to:")
print(f" {output_dir / 'original_frame.png'}.")
save_config_all_transforms(cfg.training.image_transforms, original_frame, output_dir, n_examples)
save_config_single_transforms(cfg.training.image_transforms, original_frame, output_dir, n_examples)
@hydra.main(version_base="1.2", config_name="default", config_path="../configs")
def visualize_transforms_cli(cfg):
visualize_transforms(cfg, output_dir=OUTPUT_DIR)
if __name__ == "__main__":
visualize_transforms_cli()
|
lerobot/lerobot/scripts/visualize_image_transforms.py/0
|
{
"file_path": "lerobot/lerobot/scripts/visualize_image_transforms.py",
"repo_id": "lerobot",
"token_count": 2530
}
| 170
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from lerobot.common.utils.utils import init_hydra_config
from .utils import DEVICE, KOCH_ROBOT_CONFIG_PATH
def pytest_collection_finish():
print(f"\nTesting with {DEVICE=}")
@pytest.fixture(scope="session")
def is_koch_available():
try:
from lerobot.common.robot_devices.robots.factory import make_robot
robot_cfg = init_hydra_config(KOCH_ROBOT_CONFIG_PATH)
robot = make_robot(robot_cfg)
robot.connect()
del robot
return True
except Exception as e:
print("A koch robot is not available.")
print(e)
return False
|
lerobot/tests/conftest.py/0
|
{
"file_path": "lerobot/tests/conftest.py",
"repo_id": "lerobot",
"token_count": 432
}
| 171
|
version https://git-lfs.github.com/spec/v1
oid sha256:7ffb173891cebb47a4d24d051f5fdd2ec44493d0a1a48d11f4d1410049aadd5b
size 4344
|
lerobot/tests/data/lerobot/aloha_mobile_wipe_wine/meta_data/stats.safetensors/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_mobile_wipe_wine/meta_data/stats.safetensors",
"repo_id": "lerobot",
"token_count": 69
}
| 172
|
version https://git-lfs.github.com/spec/v1
oid sha256:f98bd8f6347590aecdddaceed95d921f2d9f7bf35fbe742c37bdf12cba11dca6
size 2904
|
lerobot/tests/data/lerobot/aloha_sim_insertion_human_image/meta_data/stats.safetensors/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_sim_insertion_human_image/meta_data/stats.safetensors",
"repo_id": "lerobot",
"token_count": 69
}
| 173
|
version https://git-lfs.github.com/spec/v1
oid sha256:752660d8fd884b33b7302a4a42ec7c680de2a3e5022d7d007586f4c6337ce08a
size 247
|
lerobot/tests/data/lerobot/aloha_sim_insertion_scripted_image/train/state.json/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_sim_insertion_scripted_image/train/state.json",
"repo_id": "lerobot",
"token_count": 67
}
| 174
|
version https://git-lfs.github.com/spec/v1
oid sha256:722e0ba30e6f4ddce2d31166bfca753bf3c45c507a0f911e1635f0ec2521569e
size 247
|
lerobot/tests/data/lerobot/aloha_static_battery/train/state.json/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_static_battery/train/state.json",
"repo_id": "lerobot",
"token_count": 65
}
| 175
|
version https://git-lfs.github.com/spec/v1
oid sha256:bac9e519935d89880a9f3733a1ec7a2d0e5b3658c64ca500c1f45606d5237288
size 274824
|
lerobot/tests/data/lerobot/aloha_static_fork_pick_up/train/data-00000-of-00001.arrow/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_static_fork_pick_up/train/data-00000-of-00001.arrow",
"repo_id": "lerobot",
"token_count": 70
}
| 176
|
version https://git-lfs.github.com/spec/v1
oid sha256:823715ff128eb4034337d2bc5b2323b0ffc5f364ff301837e621fa74a4b7fd69
size 229608
|
lerobot/tests/data/lerobot/aloha_static_vinh_cup_left/train/data-00000-of-00001.arrow/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_static_vinh_cup_left/train/data-00000-of-00001.arrow",
"repo_id": "lerobot",
"token_count": 65
}
| 177
|
version https://git-lfs.github.com/spec/v1
oid sha256:50b3c026da835560f9b87e7dfd28673e766bfb58d56c85002687d0a599b6fa43
size 3304
|
lerobot/tests/data/lerobot/pusht_keypoints/meta_data/stats.safetensors/0
|
{
"file_path": "lerobot/tests/data/lerobot/pusht_keypoints/meta_data/stats.safetensors",
"repo_id": "lerobot",
"token_count": 67
}
| 178
|
version https://git-lfs.github.com/spec/v1
oid sha256:7b39a1f8b0b7fe33a136ba39ae861503f55abbe90f7e0340a2efb824c9eefd12
size 247
|
lerobot/tests/data/lerobot/unitreeh1_fold_clothes/train/state.json/0
|
{
"file_path": "lerobot/tests/data/lerobot/unitreeh1_fold_clothes/train/state.json",
"repo_id": "lerobot",
"token_count": 69
}
| 179
|
version https://git-lfs.github.com/spec/v1
oid sha256:1d1eef601c6c29ba5a9797b20fd332f5acc1b0a885434aacf90822450a1203ba
size 247
|
lerobot/tests/data/lerobot/unitreeh1_two_robot_greeting/train/state.json/0
|
{
"file_path": "lerobot/tests/data/lerobot/unitreeh1_two_robot_greeting/train/state.json",
"repo_id": "lerobot",
"token_count": 66
}
| 180
|
version https://git-lfs.github.com/spec/v1
oid sha256:9927ec508e3335f8b10cf3682e41dedb7e647f92a2063a4196f1e48749c47bc5
size 85353
|
lerobot/tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_24.safetensors/0
|
{
"file_path": "lerobot/tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_24.safetensors",
"repo_id": "lerobot",
"token_count": 65
}
| 181
|
version https://git-lfs.github.com/spec/v1
oid sha256:28738b3cfad17af0ac5181effdd796acdf7953cd5bcca3f421a11ddfd6b0076f
size 30800
|
lerobot/tests/data/save_policy_to_safetensors/dora_aloha_real_act_real_no_state/grad_stats.safetensors/0
|
{
"file_path": "lerobot/tests/data/save_policy_to_safetensors/dora_aloha_real_act_real_no_state/grad_stats.safetensors",
"repo_id": "lerobot",
"token_count": 62
}
| 182
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from safetensors.torch import save_file
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
from lerobot.common.datasets.transforms import get_image_transforms
from lerobot.common.utils.utils import init_hydra_config, seeded_context
from tests.test_image_transforms import ARTIFACT_DIR, DATASET_REPO_ID
from tests.utils import DEFAULT_CONFIG_PATH
def save_default_config_transform(original_frame: torch.Tensor, output_dir: Path):
cfg = init_hydra_config(DEFAULT_CONFIG_PATH)
cfg_tf = cfg.training.image_transforms
default_tf = get_image_transforms(
brightness_weight=cfg_tf.brightness.weight,
brightness_min_max=cfg_tf.brightness.min_max,
contrast_weight=cfg_tf.contrast.weight,
contrast_min_max=cfg_tf.contrast.min_max,
saturation_weight=cfg_tf.saturation.weight,
saturation_min_max=cfg_tf.saturation.min_max,
hue_weight=cfg_tf.hue.weight,
hue_min_max=cfg_tf.hue.min_max,
sharpness_weight=cfg_tf.sharpness.weight,
sharpness_min_max=cfg_tf.sharpness.min_max,
max_num_transforms=cfg_tf.max_num_transforms,
random_order=cfg_tf.random_order,
)
with seeded_context(1337):
img_tf = default_tf(original_frame)
save_file({"default": img_tf}, output_dir / "default_transforms.safetensors")
def save_single_transforms(original_frame: torch.Tensor, output_dir: Path):
transforms = {
"brightness": [(0.5, 0.5), (2.0, 2.0)],
"contrast": [(0.5, 0.5), (2.0, 2.0)],
"saturation": [(0.5, 0.5), (2.0, 2.0)],
"hue": [(-0.25, -0.25), (0.25, 0.25)],
"sharpness": [(0.5, 0.5), (2.0, 2.0)],
}
frames = {"original_frame": original_frame}
for transform, values in transforms.items():
for min_max in values:
kwargs = {
f"{transform}_weight": 1.0,
f"{transform}_min_max": min_max,
}
tf = get_image_transforms(**kwargs)
key = f"{transform}_{min_max[0]}_{min_max[1]}"
frames[key] = tf(original_frame)
save_file(frames, output_dir / "single_transforms.safetensors")
def main():
dataset = LeRobotDataset(DATASET_REPO_ID, image_transforms=None)
output_dir = Path(ARTIFACT_DIR)
output_dir.mkdir(parents=True, exist_ok=True)
original_frame = dataset[0][dataset.camera_keys[0]]
save_single_transforms(original_frame, output_dir)
save_default_config_transform(original_frame, output_dir)
if __name__ == "__main__":
main()
|
lerobot/tests/scripts/save_image_transforms_to_safetensors.py/0
|
{
"file_path": "lerobot/tests/scripts/save_image_transforms_to_safetensors.py",
"repo_id": "lerobot",
"token_count": 1312
}
| 183
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import pytest
from lerobot.scripts.visualize_dataset import visualize_dataset
@pytest.mark.parametrize(
"repo_id",
["lerobot/pusht"],
)
@pytest.mark.parametrize("root", [Path(__file__).parent / "data"])
def test_visualize_local_dataset(tmpdir, repo_id, root):
rrd_path = visualize_dataset(
repo_id,
episode_index=0,
batch_size=32,
save=True,
output_dir=tmpdir,
root=root,
)
assert rrd_path.exists()
|
lerobot/tests/test_visualize_dataset.py/0
|
{
"file_path": "lerobot/tests/test_visualize_dataset.py",
"repo_id": "lerobot",
"token_count": 399
}
| 184
|
check_dirs := .
quality:
black --check $(check_dirs)
ruff $(check_dirs)
style:
black $(check_dirs)
ruff $(check_dirs) --fix
|
parler-tts/Makefile/0
|
{
"file_path": "parler-tts/Makefile",
"repo_id": "parler-tts",
"token_count": 55
}
| 185
|
from transformers import PretrainedConfig
class DACConfig(PretrainedConfig):
model_type = "dac"
def __init__(
self,
num_codebooks: int = 9,
model_bitrate: int = 8, # kbps
codebook_size: int = 1024,
latent_dim: int = 1024,
frame_rate: int = 86,
sampling_rate: int = 44100,
**kwargs,
):
self.codebook_size = codebook_size
self.model_bitrate = model_bitrate
self.latent_dim = latent_dim
self.num_codebooks = num_codebooks
self.frame_rate = frame_rate
self.sampling_rate = sampling_rate
super().__init__(**kwargs)
|
parler-tts/parler_tts/dac_wrapper/configuration_dac.py/0
|
{
"file_path": "parler-tts/parler_tts/dac_wrapper/configuration_dac.py",
"repo_id": "parler-tts",
"token_count": 300
}
| 186
|
# Builds GPU docker image of PyTorch
# Uses multi-staged approach to reduce size
# Stage 1
# Use base conda image to reduce time
FROM continuumio/miniconda3:latest AS compile-image
# Specify py version
ENV PYTHON_VERSION=3.8
# Install apt libs - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
RUN apt-get update && \
apt-get install -y curl git wget software-properties-common git-lfs && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
# Install audio-related libraries
RUN apt-get update && \
apt install -y ffmpeg
RUN apt install -y libsndfile1-dev
RUN git lfs install
# Create our conda env - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
RUN conda create --name peft python=${PYTHON_VERSION} ipython jupyter pip
RUN python3 -m pip install --no-cache-dir --upgrade pip
# Below is copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
# We don't install pytorch here yet since CUDA isn't available
# instead we use the direct torch wheel
ENV PATH /opt/conda/envs/peft/bin:$PATH
# Activate our bash shell
RUN chsh -s /bin/bash
SHELL ["/bin/bash", "-c"]
# Stage 2
FROM nvidia/cuda:12.2.2-devel-ubuntu22.04 AS build-image
COPY --from=compile-image /opt/conda /opt/conda
ENV PATH /opt/conda/bin:$PATH
RUN chsh -s /bin/bash
SHELL ["/bin/bash", "-c"]
RUN source activate peft && \
python3 -m pip install --no-cache-dir bitsandbytes optimum auto-gptq
# Add autoawq for quantization testing
RUN source activate peft && \
python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.4/autoawq-0.2.4-cp38-cp38-linux_x86_64.whl
RUN source activate peft && \
python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ_kernels/releases/download/v0.0.6/autoawq_kernels-0.0.6-cp38-cp38-linux_x86_64.whl
# Install apt libs
RUN apt-get update && \
apt-get install -y curl git wget && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
# Add eetq for quantization testing
RUN source activate peft && \
python3 -m pip install git+https://github.com/NetEase-FuXi/EETQ.git
# Activate the conda env and install transformers + accelerate from source
RUN source activate peft && \
python3 -m pip install -U --no-cache-dir \
librosa \
"soundfile>=0.12.1" \
scipy \
git+https://github.com/huggingface/transformers \
git+https://github.com/huggingface/accelerate \
peft[test]@git+https://github.com/huggingface/peft
# Add aqlm for quantization testing
RUN source activate peft && \
pip install aqlm[gpu]>=1.0.2
# Add HQQ for quantization testing
RUN source activate peft && \
pip install hqq
RUN source activate peft && \
pip freeze | grep transformers
RUN echo "source activate peft" >> ~/.profile
# Activate the virtualenv
CMD ["/bin/bash"]
|
peft/docker/peft-gpu/Dockerfile/0
|
{
"file_path": "peft/docker/peft-gpu/Dockerfile",
"repo_id": "peft",
"token_count": 1085
}
| 187
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Mixed adapter types
Normally, it isn't possible to mix different adapter types in π€ PEFT. You can create a PEFT model with two different LoRA adapters (which can have different config options), but it is not possible to combine a LoRA and LoHa adapter. With [`PeftMixedModel`] however, this works as long as the adapter types are compatible. The main purpose of allowing mixed adapter types is to combine trained adapters for inference. While it is possible to train a mixed adapter model, this has not been tested and is not recommended.
To load different adapter types into a PEFT model, use [`PeftMixedModel`] instead of [`PeftModel`]:
```py
from peft import PeftMixedModel
base_model = ... # load the base model, e.g. from transformers
# load first adapter, which will be called "default"
peft_model = PeftMixedModel.from_pretrained(base_model, <path_to_adapter1>)
peft_model.load_adapter(<path_to_adapter2>, adapter_name="other")
peft_model.set_adapter(["default", "other"])
```
The [`~PeftMixedModel.set_adapter`] method is necessary to activate both adapters, otherwise only the first adapter would be active. You can keep adding more adapters by calling [`~PeftModel.add_adapter`] repeatedly.
[`PeftMixedModel`] does not support saving and loading mixed adapters. The adapters should already be trained, and loading the model requires a script to be run each time.
## Tips
- Not all adapter types can be combined. See [`peft.tuners.mixed.COMPATIBLE_TUNER_TYPES`](https://github.com/huggingface/peft/blob/1c1c7fdaa6e6abaa53939b865dee1eded82ad032/src/peft/tuners/mixed/model.py#L35) for a list of compatible types. An error will be raised if you try to combine incompatible adapter types.
- It is possible to mix multiple adapters of the same type which can be useful for combining adapters with very different configs.
- If you want to combine a lot of different adapters, the most performant way to do it is to consecutively add the same adapter types. For example, add LoRA1, LoRA2, LoHa1, LoHa2 in this order, instead of LoRA1, LoHa1, LoRA2, and LoHa2. While the order can affect the output, there is no inherently *best* order, so it is best to choose the fastest one.
|
peft/docs/source/developer_guides/mixed_models.md/0
|
{
"file_path": "peft/docs/source/developer_guides/mixed_models.md",
"repo_id": "peft",
"token_count": 770
}
| 188
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
β οΈ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Quicktour
PEFT offers parameter-efficient methods for finetuning large pretrained models. The traditional paradigm is to finetune all of a model's parameters for each downstream task, but this is becoming exceedingly costly and impractical because of the enormous number of parameters in models today. Instead, it is more efficient to train a smaller number of prompt parameters or use a reparametrization method like low-rank adaptation (LoRA) to reduce the number of trainable parameters.
This quicktour will show you PEFT's main features and how you can train or run inference on large models that would typically be inaccessible on consumer devices.
## Train
Each PEFT method is defined by a [`PeftConfig`] class that stores all the important parameters for building a [`PeftModel`]. For example, to train with LoRA, load and create a [`LoraConfig`] class and specify the following parameters:
- `task_type`: the task to train for (sequence-to-sequence language modeling in this case)
- `inference_mode`: whether you're using the model for inference or not
- `r`: the dimension of the low-rank matrices
- `lora_alpha`: the scaling factor for the low-rank matrices
- `lora_dropout`: the dropout probability of the LoRA layers
```python
from peft import LoraConfig, TaskType
peft_config = LoraConfig(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1)
```
<Tip>
See the [`LoraConfig`] reference for more details about other parameters you can adjust, such as the modules to target or the bias type.
</Tip>
Once the [`LoraConfig`] is setup, create a [`PeftModel`] with the [`get_peft_model`] function. It takes a base model - which you can load from the Transformers library - and the [`LoraConfig`] containing the parameters for how to configure a model for training with LoRA.
Load the base model you want to finetune.
```python
from transformers import AutoModelForSeq2SeqLM
model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/mt0-large")
```
Wrap the base model and `peft_config` with the [`get_peft_model`] function to create a [`PeftModel`]. To get a sense of the number of trainable parameters in your model, use the [`print_trainable_parameters`] method.
```python
from peft import get_peft_model
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
"output: trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282"
```
Out of [bigscience/mt0-large's](https://huggingface.co/bigscience/mt0-large) 1.2B parameters, you're only training 0.19% of them!
That is it π! Now you can train the model with the Transformers [`~transformers.Trainer`], Accelerate, or any custom PyTorch training loop.
For example, to train with the [`~transformers.Trainer`] class, setup a [`~transformers.TrainingArguments`] class with some training hyperparameters.
```py
training_args = TrainingArguments(
output_dir="your-name/bigscience/mt0-large-lora",
learning_rate=1e-3,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
num_train_epochs=2,
weight_decay=0.01,
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True,
)
```
Pass the model, training arguments, dataset, tokenizer, and any other necessary component to the [`~transformers.Trainer`], and call [`~transformers.Trainer.train`] to start training.
```py
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["test"],
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
trainer.train()
```
### Save model
After your model is finished training, you can save your model to a directory using the [`~transformers.PreTrainedModel.save_pretrained`] function.
```py
model.save_pretrained("output_dir")
```
You can also save your model to the Hub (make sure you're logged in to your Hugging Face account first) with the [`~transformers.PreTrainedModel.push_to_hub`] function.
```python
from huggingface_hub import notebook_login
notebook_login()
model.push_to_hub("your-name/bigscience/mt0-large-lora")
```
Both methods only save the extra PEFT weights that were trained, meaning it is super efficient to store, transfer, and load. For example, this [facebook/opt-350m](https://huggingface.co/ybelkada/opt-350m-lora) model trained with LoRA only contains two files: `adapter_config.json` and `adapter_model.safetensors`. The `adapter_model.safetensors` file is just 6.3MB!
<div class="flex flex-col justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/>
<figcaption class="text-center">The adapter weights for a opt-350m model stored on the Hub are only ~6MB compared to the full size of the model weights, which can be ~700MB.</figcaption>
</div>
## Inference
<Tip>
Take a look at the [AutoPeftModel](package_reference/auto_class) API reference for a complete list of available `AutoPeftModel` classes.
</Tip>
Easily load any PEFT-trained model for inference with the [`AutoPeftModel`] class and the [`~transformers.PreTrainedModel.from_pretrained`] method:
```py
from peft import AutoPeftModelForCausalLM
from transformers import AutoTokenizer
import torch
model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora")
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
model = model.to("cuda")
model.eval()
inputs = tokenizer("Preheat the oven to 350 degrees and place the cookie dough", return_tensors="pt")
outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=50)
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0])
"Preheat the oven to 350 degrees and place the cookie dough in the center of the oven. In a large bowl, combine the flour, baking powder, baking soda, salt, and cinnamon. In a separate bowl, combine the egg yolks, sugar, and vanilla."
```
For other tasks that aren't explicitly supported with an `AutoPeftModelFor` class - such as automatic speech recognition - you can still use the base [`AutoPeftModel`] class to load a model for the task.
```py
from peft import AutoPeftModel
model = AutoPeftModel.from_pretrained("smangrul/openai-whisper-large-v2-LORA-colab")
```
## Next steps
Now that you've seen how to train a model with one of the PEFT methods, we encourage you to try out some of the other methods like prompt tuning. The steps are very similar to the ones shown in the quicktour:
1. prepare a [`PeftConfig`] for a PEFT method
2. use the [`get_peft_model`] method to create a [`PeftModel`] from the configuration and base model
Then you can train it however you like! To load a PEFT model for inference, you can use the [`AutoPeftModel`] class.
Feel free to also take a look at the task guides if you're interested in training a model with another PEFT method for a specific task such as semantic segmentation, multilingual automatic speech recognition, DreamBooth, token classification, and more.
|
peft/docs/source/quicktour.md/0
|
{
"file_path": "peft/docs/source/quicktour.md",
"repo_id": "peft",
"token_count": 2384
}
| 189
|
import argparse
import os
from typing import Optional
from huggingface_hub import HfFolder, whoami
from transformers import PretrainedConfig
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
text_encoder_config = PretrainedConfig.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
revision=revision,
)
model_class = text_encoder_config.architectures[0]
if model_class == "CLIPTextModel":
from transformers import CLIPTextModel
return CLIPTextModel
elif model_class == "RobertaSeriesModelWithTransformation":
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesModelWithTransformation,
)
return RobertaSeriesModelWithTransformation
else:
raise ValueError(f"{model_class} is not supported.")
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Simple example of a ControlNet training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--controlnet_model_name_or_path",
type=str,
default=None,
help="Path to pretrained controlnet model or model identifier from huggingface.co/models."
" If not specified controlnet weights are initialized from unet.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help=(
"Revision of pretrained model identifier from huggingface.co/models. Trainable model components should be"
" float32 precision."
),
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--output_dir",
type=str,
default="controlnet-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
parser.add_argument(
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
)
parser.add_argument(
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
)
parser.add_argument("--num_train_epochs", type=int, default=1)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. "
"In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference."
"Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components."
"See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step"
"instructions."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=None,
help=("Max number of checkpoints to store."),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-6,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--lr_num_cycles",
type=int,
default=1,
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
)
parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--report_to",
type=str,
default="wandb",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--wandb_key",
type=str,
default=None,
help=("If report to option is set to wandb, api-key for wandb used for login to wandb "),
)
parser.add_argument(
"--wandb_project_name",
type=str,
default=None,
help=("If report to option is set to wandb, project name in wandb for log tracking "),
)
parser.add_argument(
"--wandb_run_name",
type=str,
default=None,
help=("If report to option is set to wandb, project name in wandb for log tracking "),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument(
"--set_grads_to_none",
action="store_true",
help=(
"Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain"
" behaviors, so disable this argument if it causes any problems. More info:"
" https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html"
),
)
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help=(
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
" or to a folder containing files that π€ Datasets can understand."
),
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The config of the Dataset, leave as None if there's only one config.",
)
parser.add_argument(
"--train_data_dir",
type=str,
default=None,
help=(
"A folder containing the training data. Folder contents must follow the structure described in"
" https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
" must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
),
)
parser.add_argument(
"--image_column", type=str, default="image", help="The column of the dataset containing the target image."
)
parser.add_argument(
"--conditioning_image_column",
type=str,
default="conditioning_image",
help="The column of the dataset containing the controlnet conditioning image.",
)
parser.add_argument(
"--caption_column",
type=str,
default="text",
help="The column of the dataset containing a caption or a list of captions.",
)
parser.add_argument(
"--max_train_samples",
type=int,
default=None,
help=(
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
),
)
parser.add_argument(
"--proportion_empty_prompts",
type=float,
default=0,
help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).",
)
parser.add_argument(
"--validation_prompt",
type=str,
default=None,
nargs="+",
help=(
"A set of prompts evaluated every `--validation_steps` and logged to `--report_to`."
" Provide either a matching number of `--validation_image`s, a single `--validation_image`"
" to be used with all prompts, or a single prompt that will be used with all `--validation_image`s."
),
)
parser.add_argument(
"--validation_image",
type=str,
default=None,
nargs="+",
help=(
"A set of paths to the controlnet conditioning image be evaluated every `--validation_steps`"
" and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a"
" a single `--validation_prompt` to be used with all `--validation_image`s, or a single"
" `--validation_image` that will be used with all `--validation_prompt`s."
),
)
parser.add_argument(
"--num_validation_images",
type=int,
default=4,
help="Number of images to be generated for each `--validation_image`, `--validation_prompt` pair",
)
parser.add_argument(
"--validation_steps",
type=int,
default=100,
help=(
"Run validation every X steps. Validation consists of running the prompt"
" `args.validation_prompt` multiple times: `args.num_validation_images`"
" and logging the images."
),
)
parser.add_argument(
"--tracker_project_name",
type=str,
default="train_controlnet",
help=(
"The `project_name` argument passed to Accelerator.init_trackers for"
" more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator"
),
)
# evaluation arguments
parser.add_argument("--controlnet_path", type=str, default=None, help="Path to pretrained controlnet.")
parser.add_argument("--unet_path", type=str, default=None, help="Path to pretrained unet.")
parser.add_argument("--adapter_name", type=str, default=None, help="Name of the adapter to use.")
parser.add_argument("--vis_overlays", action="store_true", help="Whether to visualize the landmarks.")
# self-invented arguments
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--name",
type=str,
help=("The name of the current experiment run, consists of [data]-[prompt]"),
)
# BOFT args
parser.add_argument("--use_boft", action="store_true", help="Whether to use BOFT for parameter efficient tuning")
parser.add_argument("--boft_block_num", type=int, default=8, help="The number of BOFT blocks")
parser.add_argument("--boft_block_size", type=int, default=0, help="The size of BOFT blocks")
parser.add_argument("--boft_n_butterfly_factor", type=int, default=0, help="The number of butterfly factors")
parser.add_argument("--boft_dropout", type=float, default=0.1, help="BOFT dropout, only used if use_boft is True")
parser.add_argument(
"--boft_bias",
type=str,
default="none",
help="Bias type for BOFT. Can be 'none', 'all' or 'boft_only', only used if use_boft is True",
)
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
if args.dataset_name is None and args.train_data_dir is None:
raise ValueError("Specify either `--dataset_name` or `--train_data_dir`")
if args.dataset_name is not None and args.train_data_dir is not None:
raise ValueError("Specify only one of `--dataset_name` or `--train_data_dir`")
if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1:
raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].")
if args.validation_prompt is not None and args.validation_image is None:
raise ValueError("`--validation_image` must be set if `--validation_prompt` is set")
if args.validation_prompt is None and args.validation_image is not None:
raise ValueError("`--validation_prompt` must be set if `--validation_image` is set")
if (
args.validation_image is not None
and args.validation_prompt is not None
and len(args.validation_image) != 1
and len(args.validation_prompt) != 1
and len(args.validation_image) != len(args.validation_prompt)
):
raise ValueError(
"Must provide either 1 `--validation_image`, 1 `--validation_prompt`,"
" or the same number of `--validation_prompt`s and `--validation_image`s"
)
if args.resolution % 8 != 0:
raise ValueError(
"`--resolution` must be divisible by 8 for consistently sized encoded images between the VAE and the controlnet encoder."
)
return args
|
peft/examples/boft_controlnet/utils/args_loader.py/0
|
{
"file_path": "peft/examples/boft_controlnet/utils/args_loader.py",
"repo_id": "peft",
"token_count": 7255
}
| 190
|
import gc
import threading
import psutil
import torch
# Converting Bytes to Megabytes
def b2mb(x):
return int(x / 2**20)
# This context manager is used to track the peak memory usage of the process
class TorchTracemalloc:
def __enter__(self):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
self.begin = torch.cuda.memory_allocated()
self.process = psutil.Process()
self.cpu_begin = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
return self
def cpu_mem_used(self):
"""get resident set size memory for the current process"""
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_peak = -1
while True:
self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
def __exit__(self, *exc):
self.peak_monitoring = False
gc.collect()
torch.cuda.empty_cache()
self.end = torch.cuda.memory_allocated()
self.peak = torch.cuda.max_memory_allocated()
self.used = b2mb(self.end - self.begin)
self.peaked = b2mb(self.peak - self.begin)
self.cpu_end = self.cpu_mem_used()
self.cpu_used = b2mb(self.cpu_end - self.cpu_begin)
self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
|
peft/examples/boft_dreambooth/utils/tracemalloc.py/0
|
{
"file_path": "peft/examples/boft_dreambooth/utils/tracemalloc.py",
"repo_id": "peft",
"token_count": 786
}
| 191
|
import os
import torch
from accelerate import Accelerator
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup
from peft import LoraConfig, TaskType, get_peft_model
from peft.utils.other import fsdp_auto_wrap_policy
def main():
accelerator = Accelerator()
model_name_or_path = "t5-base"
batch_size = 8
text_column = "sentence"
label_column = "label"
max_length = 64
lr = 1e-3
num_epochs = 1
base_path = "temp/data/FinancialPhraseBank-v1.0"
peft_config = LoraConfig(
task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
model = get_peft_model(model, peft_config)
accelerator.print(model.print_trainable_parameters())
dataset = load_dataset(
"json",
data_files={
"train": os.path.join(base_path, "financial_phrase_bank_train.jsonl"),
"validation": os.path.join(base_path, "financial_phrase_bank_val.jsonl"),
},
)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
def preprocess_function(examples):
inputs = examples[text_column]
targets = examples[label_column]
model_inputs = tokenizer(
inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt"
)
labels = tokenizer(targets, max_length=2, padding="max_length", truncation=True, return_tensors="pt")
labels = labels["input_ids"]
labels[labels == tokenizer.pad_token_id] = -100
model_inputs["labels"] = labels
return model_inputs
with accelerator.main_process_first():
processed_datasets = dataset.map(
preprocess_function,
batched=True,
num_proc=1,
remove_columns=dataset["train"].column_names,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation"]
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True
)
eval_dataloader = DataLoader(
eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True
)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=(len(train_dataloader) * num_epochs),
)
if getattr(accelerator.state, "fsdp_plugin", None) is not None:
accelerator.state.fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(model)
model, train_dataloader, eval_dataloader, optimizer, lr_scheduler = accelerator.prepare(
model, train_dataloader, eval_dataloader, optimizer, lr_scheduler
)
accelerator.print(model)
for epoch in range(num_epochs):
model.train()
total_loss = 0
for step, batch in enumerate(tqdm(train_dataloader)):
outputs = model(**batch)
loss = outputs.loss
total_loss += loss.detach().float()
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
eval_loss = 0
eval_preds = []
for step, batch in enumerate(tqdm(eval_dataloader)):
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
eval_loss += loss.detach().float()
preds = accelerator.gather_for_metrics(torch.argmax(outputs.logits, -1)).detach().cpu().numpy()
eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True))
eval_epoch_loss = eval_loss / len(eval_dataloader)
eval_ppl = torch.exp(eval_epoch_loss)
train_epoch_loss = total_loss / len(train_dataloader)
train_ppl = torch.exp(train_epoch_loss)
accelerator.print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")
correct = 0
total = 0
for pred, true in zip(eval_preds, dataset["validation"][label_column]):
if pred.strip() == true.strip():
correct += 1
total += 1
accuracy = correct / total * 100
accelerator.print(f"{accuracy=}")
accelerator.print(f"{eval_preds[:10]=}")
accelerator.print(f"{dataset['validation'][label_column][:10]=}")
accelerator.wait_for_everyone()
# Option1: Pushing the model to Hugging Face Hub
# model.push_to_hub(
# f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"),
# token = "hf_..."
# )
# token (`bool` or `str`, *optional*):
# `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated
# when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url`
# is not specified.
# Or you can get your token from https://huggingface.co/settings/token
# Option2: Saving the model locally
peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_")
model.save_pretrained(peft_model_id)
accelerator.wait_for_everyone()
if __name__ == "__main__":
main()
|
peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_fsdp.py/0
|
{
"file_path": "peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_fsdp.py",
"repo_id": "peft",
"token_count": 2543
}
| 192
|
accelerate launch --config_file config.yaml peft_adalora_whisper_large_training.py \
--model_name_or_path "openai/whisper-large-v2" \
--language "Marathi" \
--language_abbr "mr" \
--task "transcribe" \
--dataset_name "mozilla-foundation/common_voice_11_0" \
--push_to_hub \
--preprocessing_num_workers 2 \
--per_device_train_batch_size 8 \
--per_device_eval_batch_size 8 \
--dataloader_pin_memory \
--dataloader_num_workers 2 \
--learning_rate 1e-3 \
--weight_decay 1e-4 \
--num_train_epochs 3 \
--gradient_accumulation_steps 1 \
--lr_scheduler_type "linear" \
--num_warmup_steps 50 \
--output_dir "adalora_whisper_large_marathi_multi_adapter" \
--seed 42 \
--load_best_model \
--with_tracking \
--report_to "wandb" \
--hub_token $HUB_TOKEN \
--checkpointing_steps 2000 \
--evaluation_steps 2000 \
--logging_steps 25 \
--use_peft \
--use_adalora \
--init_r 12 \
--target_r 8 \
--tinit 100 \
--tfinal 800 \
--delta_t 10 \
--lora_alpha 32 \
--lora_dropout 0.1 \
--orth_reg_weight 0.5
|
peft/examples/int8_training/run_adalora_whisper_int8.sh/0
|
{
"file_path": "peft/examples/int8_training/run_adalora_whisper_int8.sh",
"repo_id": "peft",
"token_count": 509
}
| 193
|
<jupyter_start><jupyter_text>Dreambooth with OFTThis Notebook assumes that you already ran the train_dreambooth.py script to create your own adapter.<jupyter_code>from diffusers import DiffusionPipeline
from diffusers.utils import check_min_version, get_logger
from peft import PeftModel
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.10.0.dev0")
logger = get_logger(__name__)
BASE_MODEL_NAME = "stabilityai/stable-diffusion-2-1-base"
ADAPTER_MODEL_PATH = "INSERT MODEL PATH HERE"
pipe = DiffusionPipeline.from_pretrained(
BASE_MODEL_NAME,
)
pipe.to("cuda")
pipe.unet = PeftModel.from_pretrained(pipe.unet, ADAPTER_MODEL_PATH + "/unet", adapter_name="default")
pipe.text_encoder = PeftModel.from_pretrained(
pipe.text_encoder, ADAPTER_MODEL_PATH + "/text_encoder", adapter_name="default"
)
prompt = "A photo of a sks dog"
image = pipe(
prompt,
num_inference_steps=50,
height=512,
width=512,
).images[0]
image<jupyter_output><empty_output>
|
peft/examples/oft_dreambooth/oft_dreambooth_inference.ipynb/0
|
{
"file_path": "peft/examples/oft_dreambooth/oft_dreambooth_inference.ipynb",
"repo_id": "peft",
"token_count": 376
}
| 194
|
import argparse
import evaluate
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from peft import (
PrefixTuningConfig,
PromptEncoderConfig,
PromptTuningConfig,
get_peft_model,
)
from peft.utils.other import fsdp_auto_wrap_policy
def parse_args():
parser = argparse.ArgumentParser(description="PEFT a transformers model on a sequence classification task")
parser.add_argument(
"--num_virtual_tokens",
type=int,
default=20,
help="num_virtual_tokens if the number of virtual tokens used in prompt/prefix/P tuning.",
)
parser.add_argument(
"--encoder_hidden_size",
type=int,
default=128,
help="encoder_hidden_size if the encoder hidden size used in P tuninig/Prefix tuning.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-3,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--peft_type",
type=str,
default="p_tuning",
help="The PEFT type to use.",
choices=["p_tuning", "prefix_tuning", "prompt_tuning"],
)
args = parser.parse_args()
assert args.output_dir is not None, "Need an `output_dir` to store the finetune model and verify."
return args
def main():
args = parse_args()
ddp_scaler = DistributedDataParallelKwargs(find_unused_parameters=True)
accelerator = Accelerator(kwargs_handlers=[ddp_scaler])
task = "mrpc"
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
if args.peft_type == "p_tuning":
peft_config = PromptEncoderConfig(
task_type="SEQ_CLS",
num_virtual_tokens=args.num_virtual_tokens,
encoder_hidden_size=args.encoder_hidden_size,
)
elif args.peft_type == "prefix_tuning":
peft_config = PrefixTuningConfig(
task_type="SEQ_CLS",
num_virtual_tokens=args.num_virtual_tokens,
encoder_hidden_size=args.encoder_hidden_size,
)
else:
peft_config = PromptTuningConfig(task_type="SEQ_CLS", num_virtual_tokens=args.num_virtual_tokens)
tokenizer_kwargs = {}
if any(k in args.model_name_or_path for k in ("gpt", "opt", "bloom")):
tokenizer_kwargs["padding_side"] = "left"
else:
tokenizer_kwargs["padding_side"] = "right"
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, **tokenizer_kwargs)
if getattr(tokenizer, "pad_token_id") is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
datasets = load_dataset("glue", task)
metric = evaluate.load("glue", task)
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
def collate_fn(examples):
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"],
shuffle=False,
collate_fn=collate_fn,
batch_size=args.per_device_eval_batch_size,
)
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
if getattr(accelerator.state, "fsdp_plugin", None) is not None:
accelerator.state.fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(model)
model = accelerator.prepare(model)
optimizer = AdamW(params=model.parameters(), lr=args.learning_rate)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=(len(train_dataloader) * args.num_train_epochs),
)
if getattr(accelerator.state, "fsdp_plugin", None) is not None:
train_dataloader, eval_dataloader, optimizer, lr_scheduler = accelerator.prepare(
train_dataloader, eval_dataloader, optimizer, lr_scheduler
)
else:
model, train_dataloader, eval_dataloader, optimizer, lr_scheduler = accelerator.prepare(
model, train_dataloader, eval_dataloader, optimizer, lr_scheduler
)
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(tqdm(train_dataloader)):
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
samples_seen = 0
for step, batch in enumerate(tqdm(eval_dataloader)):
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather((predictions, batch["labels"]))
# If we are in a multiprocess environment, the last batch has duplicates
if accelerator.num_processes > 1:
if step == len(eval_dataloader) - 1:
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
accelerator.print(f"epoch {epoch}:", eval_metric)
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, state_dict=accelerator.get_state_dict(model))
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if __name__ == "__main__":
main()
|
peft/examples/sequence_classification/peft_no_lora_accelerate.py/0
|
{
"file_path": "peft/examples/sequence_classification/peft_no_lora_accelerate.py",
"repo_id": "peft",
"token_count": 3361
}
| 195
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import importlib
import os
from typing import Optional
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoTokenizer,
)
from .config import PeftConfig
from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING
from .peft_model import (
PeftModel,
PeftModelForCausalLM,
PeftModelForFeatureExtraction,
PeftModelForQuestionAnswering,
PeftModelForSeq2SeqLM,
PeftModelForSequenceClassification,
PeftModelForTokenClassification,
)
from .utils.constants import TOKENIZER_CONFIG_NAME
from .utils.other import check_file_exists_on_hf_hub
class _BaseAutoPeftModel:
_target_class = None
_target_peft_class = None
def __init__(self, *args, **kwargs):
# For consistency with transformers: https://github.com/huggingface/transformers/blob/91d7df58b6537d385e90578dac40204cb550f706/src/transformers/models/auto/auto_factory.py#L400
raise EnvironmentError( # noqa: UP024
f"{self.__class__.__name__} is designed to be instantiated "
f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
f"`{self.__class__.__name__}.from_config(config)` methods."
)
@classmethod
def from_pretrained(
cls,
pretrained_model_name_or_path,
adapter_name: str = "default",
is_trainable: bool = False,
config: Optional[PeftConfig] = None,
revision: Optional[str] = None,
**kwargs,
):
r"""
A wrapper around all the preprocessing steps a user needs to perform in order to load a PEFT model. The kwargs
are passed along to `PeftConfig` that automatically takes care of filtering the kwargs of the Hub methods and
the config object init.
"""
peft_config = PeftConfig.from_pretrained(pretrained_model_name_or_path, revision=revision, **kwargs)
base_model_path = peft_config.base_model_name_or_path
base_model_revision = peft_config.revision
task_type = getattr(peft_config, "task_type", None)
if cls._target_class is not None:
target_class = cls._target_class
elif cls._target_class is None and task_type is not None:
# this is only in the case where we use `AutoPeftModel`
raise ValueError(
"Cannot use `AutoPeftModel` with a task type, please use a specific class for your task type. (e.g. `AutoPeftModelForCausalLM` for `task_type='CAUSAL_LM'`)"
)
if task_type is not None:
expected_target_class = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[task_type]
if cls._target_peft_class.__name__ != expected_target_class.__name__:
raise ValueError(
f"Expected target PEFT class: {expected_target_class.__name__}, but you have asked for: {cls._target_peft_class.__name__ }"
" make sure that you are loading the correct model for your task type."
)
elif task_type is None and getattr(peft_config, "auto_mapping", None) is not None:
auto_mapping = getattr(peft_config, "auto_mapping", None)
base_model_class = auto_mapping["base_model_class"]
parent_library_name = auto_mapping["parent_library"]
parent_library = importlib.import_module(parent_library_name)
target_class = getattr(parent_library, base_model_class)
else:
raise ValueError(
"Cannot infer the auto class from the config, please make sure that you are loading the correct model for your task type."
)
base_model = target_class.from_pretrained(base_model_path, revision=base_model_revision, **kwargs)
tokenizer_exists = False
if os.path.exists(os.path.join(pretrained_model_name_or_path, TOKENIZER_CONFIG_NAME)):
tokenizer_exists = True
else:
token = kwargs.get("token", None)
if token is None:
token = kwargs.get("use_auth_token", None)
tokenizer_exists = check_file_exists_on_hf_hub(
repo_id=pretrained_model_name_or_path,
filename=TOKENIZER_CONFIG_NAME,
revision=revision,
repo_type=kwargs.get("repo_type", None),
token=token,
)
if tokenizer_exists:
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=kwargs.get("trust_remote_code", False)
)
base_model.resize_token_embeddings(len(tokenizer))
return cls._target_peft_class.from_pretrained(
base_model,
pretrained_model_name_or_path,
adapter_name=adapter_name,
is_trainable=is_trainable,
config=config,
**kwargs,
)
class AutoPeftModel(_BaseAutoPeftModel):
_target_class = None
_target_peft_class = PeftModel
class AutoPeftModelForCausalLM(_BaseAutoPeftModel):
_target_class = AutoModelForCausalLM
_target_peft_class = PeftModelForCausalLM
class AutoPeftModelForSeq2SeqLM(_BaseAutoPeftModel):
_target_class = AutoModelForSeq2SeqLM
_target_peft_class = PeftModelForSeq2SeqLM
class AutoPeftModelForSequenceClassification(_BaseAutoPeftModel):
_target_class = AutoModelForSequenceClassification
_target_peft_class = PeftModelForSequenceClassification
class AutoPeftModelForTokenClassification(_BaseAutoPeftModel):
_target_class = AutoModelForTokenClassification
_target_peft_class = PeftModelForTokenClassification
class AutoPeftModelForQuestionAnswering(_BaseAutoPeftModel):
_target_class = AutoModelForQuestionAnswering
_target_peft_class = PeftModelForQuestionAnswering
class AutoPeftModelForFeatureExtraction(_BaseAutoPeftModel):
_target_class = AutoModel
_target_peft_class = PeftModelForFeatureExtraction
|
peft/src/peft/auto.py/0
|
{
"file_path": "peft/src/peft/auto.py",
"repo_id": "peft",
"token_count": 2741
}
| 196
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Any, List, Optional
import packaging
import torch
import transformers
from torch import nn
from peft.tuners.lora import LoraLayer
from peft.tuners.tuners_utils import check_adapters_to_merge
from peft.utils import transpose
if packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.33.0"):
from transformers.integrations import deepspeed_config
else:
from transformers.deepspeed import deepspeed_config
class AdaLoraLayer(LoraLayer):
# List all names of layers that may contain adapter weights
# Note: ranknum doesn't need to be included as it is not an nn.Module
adapter_layer_names = ("lora_A", "lora_B", "lora_E", "lora_embedding_A", "lora_embedding_B")
# All names of other parameters that may contain adapter-related parameters
other_param_names = ("r", "lora_alpha", "scaling", "lora_dropout", "ranknum")
def __init__(self, base_layer: nn.Module) -> None:
super().__init__(base_layer)
self.lora_E = nn.ParameterDict({})
self.lora_A = nn.ParameterDict({})
self.lora_B = nn.ParameterDict({})
self.ranknum = nn.ParameterDict({})
def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
if r < 0:
# note: r == 0 is allowed for AdaLora, see #1539
raise ValueError(f"`r` should be a positive integer or 0, but the value passed is {r}")
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
lora_dropout_layer = nn.Identity()
self.lora_dropout[adapter_name] = lora_dropout_layer
# Actual trainable parameters
# Right singular vectors
self.lora_A[adapter_name] = nn.Parameter(torch.randn(r, self.in_features))
# Singular values
self.lora_E[adapter_name] = nn.Parameter(torch.randn(r, 1))
# Left singular vectors
self.lora_B[adapter_name] = nn.Parameter(torch.randn(self.out_features, r))
# The current rank
self.ranknum[adapter_name] = nn.Parameter(torch.randn(1), requires_grad=False)
self.ranknum[adapter_name].data.fill_(float(r))
self.ranknum[adapter_name].requires_grad = False
self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r)
if init_lora_weights:
self.reset_lora_parameters(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_lora_parameters(self, adapter_name):
if adapter_name in self.lora_A.keys():
nn.init.zeros_(self.lora_E[adapter_name])
nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02)
nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02)
class SVDLinear(nn.Module, AdaLoraLayer):
# SVD-based adaptation by a dense layer
def __init__(
self,
base_layer: nn.Module,
adapter_name: str,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
fan_in_fan_out: bool = False,
init_lora_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
AdaLoraLayer.__init__(self, base_layer)
# Freezing the pre-trained weight matrix
self.get_base_layer().weight.requires_grad = False
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
base_layer = self.get_base_layer()
if active_adapter in self.lora_A.keys():
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weights = base_layer.weight.data.clone()
orig_weights += self.get_delta_weight(active_adapter)
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += self.get_delta_weight(active_adapter)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.lora_A.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def get_delta_weight(self, adapter) -> torch.Tensor:
return (
transpose(self.lora_B[adapter] @ (self.lora_A[adapter] * self.lora_E[adapter]), self.fan_in_fan_out)
* self.scaling[adapter]
/ (self.ranknum[adapter] + 1e-5)
)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
lora_E = self.lora_E[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
ranknum = self.ranknum[active_adapter] + 1e-5
x = x.to(lora_A.dtype)
result += (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "adalora." + rep
class RankAllocator:
"""
The RankAllocator for AdaLoraModel. Paper: https://openreview.net/pdf?id=lq62uWRJjiY
Args:
config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
model: the model that we apply AdaLoRA to.
"""
def __init__(self, model, peft_config, adapter_name):
self.peft_config = peft_config
self.adapter_name = adapter_name
self.beta1 = peft_config.beta1
self.beta2 = peft_config.beta2
assert self.beta1 > 0 and self.beta1 < 1
assert self.beta2 > 0 and self.beta2 < 1
self.reset_ipt()
self._set_budget_scheduler(model)
def set_total_step(self, total_step):
self.peft_config.total_step = total_step
def reset_ipt(self):
self.ipt = {}
self.exp_avg_ipt = {}
self.exp_avg_unc = {}
def _set_budget_scheduler(self, model):
self.init_bgt = 0
self.name_set = set()
for n, p in model.named_parameters():
if f"lora_A.{self.adapter_name}" in n:
self.init_bgt += p.size(0)
self.name_set.add(n.replace("lora_A", "%s"))
self.name_set = sorted(self.name_set)
# The total final rank budget
self.target_bgt = self.peft_config.target_r * len(self.name_set)
def budget_schedule(self, step: int):
tinit = self.peft_config.tinit
tfinal = self.peft_config.tfinal
total_step = self.peft_config.total_step
# Initial warmup
if step <= tinit:
budget = self.init_bgt
mask_ind = False
# Final fine-tuning
elif step > total_step - tfinal:
budget = self.target_bgt
mask_ind = True
else:
# Budget decreasing with a cubic scheduler
mul_coeff = 1 - (step - tinit) / (total_step - tfinal - tinit)
budget = int((self.init_bgt - self.target_bgt) * (mul_coeff**3) + self.target_bgt)
mask_ind = True if step % self.peft_config.deltaT == 0 else False
return budget, mask_ind
def update_ipt(self, model):
# Update the sensitivity and uncertainty for every weight
for n, p in model.named_parameters():
if "lora_" in n and self.adapter_name in n:
if n not in self.ipt:
self.ipt[n] = torch.zeros_like(p)
self.exp_avg_ipt[n] = torch.zeros_like(p)
self.exp_avg_unc[n] = torch.zeros_like(p)
with torch.no_grad():
if deepspeed_config() is not None:
import deepspeed
grad = deepspeed.utils.safe_get_full_grad(p)
self.ipt[n] = (p * grad).abs().detach()
else:
self.ipt[n] = (p * p.grad).abs().detach()
# Sensitivity smoothing
self.exp_avg_ipt[n] = self.beta1 * self.exp_avg_ipt[n] + (1 - self.beta1) * self.ipt[n]
# Uncertainty quantification
self.exp_avg_unc[n] = (
self.beta2 * self.exp_avg_unc[n] + (1 - self.beta2) * (self.ipt[n] - self.exp_avg_ipt[n]).abs()
)
def _element_score(self, n):
return self.exp_avg_ipt[n] * self.exp_avg_unc[n]
def _combine_ipt(self, ipt_E, ipt_AB):
ipt_AB = ipt_AB.sum(dim=1, keepdim=False)
sum_ipt = ipt_E.view(-1) + ipt_AB.view(-1)
return sum_ipt
def mask_to_budget(self, model, budget):
value_ipt = {}
vector_ipt = {}
triplet_ipt = {}
# Get the importance score for A, E, B
for n, p in model.named_parameters():
if f"lora_A.{self.adapter_name}" in n:
entry_ipt = self._element_score(n)
comb_ipt = torch.mean(entry_ipt, dim=1, keepdim=True)
name_m = n.replace("lora_A", "%s")
if name_m not in vector_ipt:
vector_ipt[name_m] = [comb_ipt]
else:
vector_ipt[name_m].append(comb_ipt)
if f"lora_B.{self.adapter_name}" in n:
entry_ipt = self._element_score(n)
comb_ipt = torch.mean(entry_ipt, dim=0, keepdim=False).view(-1, 1)
name_m = n.replace("lora_B", "%s")
if name_m not in vector_ipt:
vector_ipt[name_m] = [comb_ipt]
else:
vector_ipt[name_m].append(comb_ipt)
if f"lora_E.{self.adapter_name}" in n:
entry_ipt = self._element_score(n)
name_m = n.replace("lora_E", "%s")
value_ipt[name_m] = entry_ipt
all_score = []
# Calculate the score for each triplet
for name_m in vector_ipt:
ipt_E = value_ipt[name_m]
ipt_AB = torch.cat(vector_ipt[name_m], dim=1)
sum_ipt = self._combine_ipt(ipt_E, ipt_AB)
name_E = name_m % "lora_E"
triplet_ipt[name_E] = sum_ipt.view(-1, 1)
all_score.append(sum_ipt.view(-1))
# Get the threshold by ranking ipt
mask_threshold = torch.kthvalue(
torch.cat(all_score),
k=self.init_bgt - budget,
)[0].item()
rank_pattern = {}
# Mask the unimportant triplets
with torch.no_grad():
for n, p in model.named_parameters():
if f"lora_E.{self.adapter_name}" in n:
p.masked_fill_(triplet_ipt[n] <= mask_threshold, 0.0)
rank_pattern[n] = (~(triplet_ipt[n] <= mask_threshold)).view(-1).tolist()
return rank_pattern
def update_and_allocate(self, model, global_step, force_mask=False):
# # Update the importance score and allocate the budget
if global_step < self.peft_config.total_step - self.peft_config.tfinal:
self.update_ipt(model)
budget, mask_ind = self.budget_schedule(global_step)
# Allocate the budget according to importance scores
if mask_ind or force_mask:
rank_pattern = self.mask_to_budget(model, budget)
else:
rank_pattern = None
return budget, rank_pattern
def mask_using_rank_pattern(self, model, rank_pattern):
# Mask the unimportant triplets
is_adapter_name_truncated = False
if self.adapter_name not in next(iter(rank_pattern.keys())):
is_adapter_name_truncated = True
with torch.no_grad():
for n, p in model.named_parameters():
if f"lora_E.{self.adapter_name}" in n:
key = n if not is_adapter_name_truncated else n.replace(f".{self.adapter_name}", "")
mask = torch.Tensor(rank_pattern[key]).unsqueeze(-1).to(p.device)
p.masked_fill_(~mask.bool(), 0.0)
|
peft/src/peft/tuners/adalora/layer.py/0
|
{
"file_path": "peft/src/peft/tuners/adalora/layer.py",
"repo_id": "peft",
"token_count": 7167
}
| 197
|
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Any, List, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
class FourierFTLayer(BaseTunerLayer):
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names = ("fourierft_spectrum",)
# All names of other parameters that may contain adapter-related parameters
other_param_names = ("fourierft_n_frequency", "fourierft_scaling", "fourierft_random_loc_seed")
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.fourierft_n_frequency = {}
self.fourierft_scaling = {}
self.fourierft_spectrum = nn.ParameterDict({})
self.indices = {}
self.fourierft_random_loc_seed = {}
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
self.in_features, self.out_features = base_layer.in_features, base_layer.out_features
elif isinstance(base_layer, Conv1D):
self.in_features, self.out_features = (
base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape
)
else:
raise ValueError(f"Unsupported layer type {type(base_layer)}")
def update_layer(self, adapter_name, n_frequency, scaling, init_weights, random_loc_seed):
if n_frequency <= 0:
raise ValueError(f"`n_frequency` should be a positive integer value but the value passed is {n_frequency}")
if n_frequency > self.in_features * self.out_features:
raise ValueError(
f"`n_frequency` should be less than or equal to the product of the input and output dimensions "
f"but the value passed is {n_frequency} and the product is {self.in_features * self.out_features}"
)
self.fourierft_n_frequency[adapter_name] = n_frequency
self.fourierft_random_loc_seed[adapter_name] = random_loc_seed
self.indices[adapter_name] = torch.randperm(
self.out_features * self.in_features,
generator=torch.Generator().manual_seed(self.fourierft_random_loc_seed[adapter_name]),
)[:n_frequency]
self.indices[adapter_name] = torch.stack(
[self.indices[adapter_name] // self.in_features, self.indices[adapter_name] % self.in_features], dim=0
)
self.fourierft_scaling[adapter_name] = scaling
# Actual trainable parameters
self.fourierft_spectrum[adapter_name] = nn.Parameter(torch.randn(n_frequency), requires_grad=True)
if init_weights:
self.reset_fourier_parameters(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
@torch.no_grad()
def reset_fourier_parameters(self, adapter_name):
if adapter_name in self.fourierft_spectrum.keys():
nn.init.zeros_(self.fourierft_spectrum[adapter_name])
def get_delta_weight(self, adapter) -> torch.Tensor:
spectrum = self.fourierft_spectrum[adapter]
indices = self.indices[adapter].to(spectrum.device)
dense_spectrum = torch.zeros(self.out_features, self.in_features, device=spectrum.device, dtype=spectrum.dtype)
dense_spectrum[indices[0, :], indices[1, :]] = spectrum
delta_weight = torch.fft.ifft2(dense_spectrum).real * self.fourierft_scaling[adapter]
return delta_weight
class FourierFTLinear(nn.Module, FourierFTLayer):
# FourierFT implemented in a dense layer
def __init__(
self,
base_layer,
adapter_name: str,
n_frequency: int = 1000,
scaling: float = 150.0,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
init_weights: Union[bool, str] = False,
random_loc_seed: int = 777,
**kwargs,
) -> None:
super().__init__()
FourierFTLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(adapter_name, n_frequency, scaling, init_weights, random_loc_seed)
def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.fourierft_spectrum.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weights = base_layer.weight.data.clone()
orig_weights += self.get_delta_weight(active_adapter)
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += self.get_delta_weight(active_adapter)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.fourierft_spectrum.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def get_delta_weight(self, adapter) -> torch.Tensor:
return super().get_delta_weight(adapter)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.fourierft_spectrum.keys():
continue
delta_w = self.get_delta_weight(active_adapter)
x = x.to(delta_w.dtype)
result = result + F.linear(x, delta_w)
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "fourierft." + rep
|
peft/src/peft/tuners/fourierft/layer.py/0
|
{
"file_path": "peft/src/peft/tuners/fourierft/layer.py",
"repo_id": "peft",
"token_count": 3638
}
| 198
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import math
import warnings
from typing import Any, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from accelerate.utils.imports import is_xpu_available
from torch import svd_lowrank
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils.integrations import dequantize_module_weight, gather_params_ctx
from peft.utils.other import transpose
from .config import LoraConfig
from .dora import DoraConv2dLayer, DoraEmbeddingLayer, DoraLinearLayer
class LoraLayer(BaseTunerLayer):
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names = ("lora_A", "lora_B", "lora_embedding_A", "lora_embedding_B")
# All names of other parameters that may contain adapter-related parameters
other_param_names = ("r", "lora_alpha", "scaling", "lora_dropout")
def __init__(self, base_layer: nn.Module, ephemeral_gpu_offload: bool = False, **kwargs) -> None:
self.base_layer = base_layer
self.r = {}
self.lora_alpha = {}
self.scaling = {}
self.lora_dropout = nn.ModuleDict({})
self.lora_A = nn.ModuleDict({})
self.lora_B = nn.ModuleDict({})
# For Embedding layer
self.lora_embedding_A = nn.ParameterDict({})
self.lora_embedding_B = nn.ParameterDict({})
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
self.use_dora: dict[str, bool] = {}
self.lora_magnitude_vector = torch.nn.ModuleDict() # for DoRA
self._caches: dict[str, Any] = {}
self.ephemeral_gpu_offload: bool = ephemeral_gpu_offload
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
in_features, out_features = base_layer.in_features, base_layer.out_features
elif isinstance(base_layer, nn.Conv2d):
in_features, out_features = base_layer.in_channels, base_layer.out_channels
elif isinstance(base_layer, nn.Embedding):
in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim
elif isinstance(base_layer, Conv1D):
in_features, out_features = (
base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape
)
elif hasattr(base_layer, "infeatures") and hasattr(base_layer, "outfeatures"):
# QuantLinear
in_features, out_features = base_layer.infeatures, base_layer.outfeatures
elif hasattr(base_layer, "input_size") and hasattr(base_layer, "output_size"):
# Megatron ColumnParallelLinear,RowParallelLinear
in_features, out_features = base_layer.input_size, base_layer.output_size
elif hasattr(base_layer, "codebooks") and base_layer.__class__.__name__ == "QuantizedLinear":
# AQLM QuantLinear
in_features, out_features = base_layer.in_features, base_layer.out_features
elif hasattr(base_layer, "w_bit") and base_layer.__class__.__name__ == "WQLinear_GEMM":
# Awq layers
in_features, out_features = base_layer.in_features, base_layer.out_features
elif base_layer.__class__.__name__ == "EetqLinear":
# Eetq layers
in_features, out_features = base_layer.in_features, base_layer.out_features
elif hasattr(base_layer, "W_q") and base_layer.__class__.__name__ == "HQQLinear":
# HQQ layers
in_features, out_features = base_layer.in_features, base_layer.out_features
else:
# possibly support user provided custom layer types using dynamic dispatch
if hasattr(base_layer, "in_features") and hasattr(base_layer, "out_features"):
in_features, out_features = base_layer.in_features, base_layer.out_features
else:
in_features, out_features = None, None
warnings.warn(
f"Unsupported layer type '{type(base_layer)}' encountered, proceed at your own risk.", UserWarning
)
self.in_features = in_features
self.out_features = out_features
def update_layer(
self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora: bool = False
):
# This code works for linear layers, override for other layer types
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
lora_dropout_layer = nn.Identity()
self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
# Actual trainable parameters
self.lora_A[adapter_name] = nn.Linear(self.in_features, r, bias=False)
self.lora_B[adapter_name] = nn.Linear(r, self.out_features, bias=False)
if use_rslora:
self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
else:
self.scaling[adapter_name] = lora_alpha / r
# for inits that require access to the base weight, use gather_param_ctx so that the weight is gathered when using DeepSpeed
if isinstance(init_lora_weights, str) and init_lora_weights.startswith("pissa"):
with gather_params_ctx(self.get_base_layer().weight):
self.pissa_init(adapter_name, init_lora_weights)
elif isinstance(init_lora_weights, str) and init_lora_weights.lower() == "olora":
with gather_params_ctx(self.get_base_layer().weight):
self.olora_init(adapter_name)
elif init_lora_weights == "loftq":
with gather_params_ctx(self.get_base_layer().weight):
self.loftq_init(adapter_name)
elif init_lora_weights:
self.reset_lora_parameters(adapter_name, init_lora_weights)
# call this before dora_init
self._move_adapter_to_device_of_base_layer(adapter_name)
if use_dora:
self.dora_init(adapter_name)
self.use_dora[adapter_name] = True
else:
self.use_dora[adapter_name] = False
self.set_adapter(self.active_adapters)
def reset_lora_parameters(self, adapter_name, init_lora_weights):
if init_lora_weights is False:
return
if adapter_name in self.lora_A.keys():
if init_lora_weights is True:
# initialize A the same way as the default for nn.Linear and B to zero
# https://github.com/microsoft/LoRA/blob/a0a92e0f26c067cf94747bdbf1ce73793fa44d19/loralib/layers.py#L124
nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5))
elif init_lora_weights.lower() == "gaussian":
nn.init.normal_(self.lora_A[adapter_name].weight, std=1 / self.r[adapter_name])
else:
raise ValueError(f"Unknown initialization {init_lora_weights=}")
nn.init.zeros_(self.lora_B[adapter_name].weight)
if adapter_name in self.lora_embedding_A.keys():
# Initialize A to zeros and B the same way as the default for nn.Embedding, see:
# https://github.com/microsoft/LoRA/blob/4c0333854cb905966f8cc4e9a74068c1e507c7b7/loralib/layers.py#L59-L60
nn.init.zeros_(self.lora_embedding_A[adapter_name])
nn.init.normal_(self.lora_embedding_B[adapter_name])
def olora_init(self, adapter_name):
dtype = self.get_base_layer().weight.dtype
if dtype in [torch.int8, torch.uint8]:
weight_tensor = dequantize_module_weight(self.get_base_layer())
elif dtype in [torch.float32, torch.float16, torch.bfloat16]:
weight_tensor = self.get_base_layer().weight
else:
raise TypeError(f"Unsupported data type for the base layer. Got {dtype}.")
scale_factor = self.scaling[adapter_name]
r = self.r[adapter_name]
weight_tensor = weight_tensor.to(torch.float32)
Q, R = torch.linalg.qr(weight_tensor.data)
Qr, Rr = Q[:, :r], R[:r]
self.lora_A[adapter_name].weight.data = Rr.contiguous()
self.lora_B[adapter_name].weight.data = Qr.contiguous()
weight_tensor.data -= scale_factor * self.lora_B[adapter_name].weight @ self.lora_A[adapter_name].weight
weight_tensor = weight_tensor.to(dtype)
self.get_base_layer().weight.data = weight_tensor
def pissa_init(self, adapter_name, init_lora_weights):
weight = self.get_base_layer().weight
dtype = weight.dtype
if dtype not in [torch.float32, torch.float16, torch.bfloat16]:
raise TypeError(
"Please initialize PiSSA under float32, float16, or bfloat16. "
"Subsequently, re-quantize the residual model to help minimize quantization errors."
)
weight = weight.to(torch.float32)
if init_lora_weights == "pissa":
# USV^T = W <-> VSU^T = W^T, where W^T = weight.data in R^{out_channel, in_channel},
V, S, Uh = torch.linalg.svd(weight.data, full_matrices=False)
Vr = V[:, : self.r[adapter_name]]
Sr = S[: self.r[adapter_name]]
Sr /= self.scaling[adapter_name]
Uhr = Uh[: self.r[adapter_name]]
elif len(init_lora_weights.split("_niter_")) == 2:
Vr, Sr, Ur = svd_lowrank(
weight.data, self.r[adapter_name], niter=int(init_lora_weights.split("_niter_")[-1])
)
Sr /= self.scaling[adapter_name]
Uhr = Ur.t()
else:
raise ValueError(
f"init_lora_weights should be 'pissa' or 'pissa_niter_[number of iters]', got {init_lora_weights} instead."
)
lora_A = torch.diag(torch.sqrt(Sr)) @ Uhr
lora_B = Vr @ torch.diag(torch.sqrt(Sr))
self.lora_A[adapter_name].weight.data = lora_A
self.lora_B[adapter_name].weight.data = lora_B
weight = weight.data - self.scaling[adapter_name] * lora_B @ lora_A
weight = weight.to(dtype)
self.get_base_layer().weight.data = weight
def loftq_init(self, adapter_name):
from peft.utils.loftq_utils import loftq_init
weight = self.get_base_layer().weight
kwargs = {
"num_bits": self.kwargs.get("loftq_bits", 4),
"reduced_rank": self.r[adapter_name],
"num_iter": self.kwargs.get("loftq_iter", 1),
}
qweight, lora_A, lora_B = loftq_init(weight, **kwargs)
if adapter_name in self.lora_A.keys():
# initialize A the same way as the default for nn.Linear and B to zero
self.lora_A[adapter_name].weight.data = lora_A
self.lora_B[adapter_name].weight.data = lora_B
if adapter_name in self.lora_embedding_A.keys():
# initialize a the same way as the default for nn.linear and b to zero
self.lora_embedding_A[adapter_name].weight.data = lora_A
self.lora_embedding_B[adapter_name].weight.data = lora_B
self.get_base_layer().weight.data = qweight
def dora_init(self, adapter_name: str) -> None:
if not self.lora_magnitude_vector:
# first dora layer being added, add lora_magnitude_vector to the list of learnable parameters
self.adapter_layer_names = self.adapter_layer_names[:] + ("lora_magnitude_vector",)
dora_layer = DoraLinearLayer(fan_in_fan_out=getattr(self, "fan_in_fan_out", False))
lora_A = self.lora_A[adapter_name].weight
lora_B = self.lora_B[adapter_name].weight
place_on_cpu = self.ephemeral_gpu_offload and (lora_A.device.type == "cpu" or lora_B.device.type == "cpu")
if self.ephemeral_gpu_offload:
if lora_A.device.type in ["cuda", "xpu"]:
lora_B = lora_B.to(lora_A.device)
else:
if lora_B.device.type not in ["cuda", "xpu"]:
if is_xpu_available():
lora_B = lora_B.to("xpu")
else:
lora_B = lora_B.to("cuda")
lora_A = lora_A.to(lora_B.device)
scaling = self.scaling[adapter_name]
dora_layer.update_layer(
base_layer=self.get_base_layer(), lora_A=lora_A, lora_B=lora_B, scaling=scaling, place_on_cpu=place_on_cpu
)
self.lora_magnitude_vector[adapter_name] = dora_layer
def _cache_store(self, key: str, value: Any) -> None:
self._caches[key] = value
def _cache_pop(self, key: str) -> Any:
value = self._caches.pop(key)
return value
def set_scale(self, adapter, scale):
if adapter not in self.scaling:
# Ignore the case where the adapter is not in the layer
return
self.scaling[adapter] = scale * self.lora_alpha[adapter] / self.r[adapter]
def scale_layer(self, scale: float) -> None:
if scale == 1:
return
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
self.scaling[active_adapter] *= scale
def unscale_layer(self, scale=None) -> None:
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
if scale is None:
self.scaling[active_adapter] = self.lora_alpha[active_adapter] / self.r[active_adapter]
else:
self.scaling[active_adapter] /= scale
def _check_forward_args(self, x, *args, **kwargs):
"""Check if the arguments are compatible with the configs and state of the model"""
adapter_names = kwargs.get("adapter_names", None)
if adapter_names is None:
return
if len(x) != len(adapter_names):
msg = (
"Length of `adapter_names` should be the same as the number of inputs, but got "
f"{len(adapter_names)} and {len(x)} respectively."
)
raise ValueError(msg)
if self.merged:
# It is unclear what would be the right thing to do if users pass adapter_names and there are merged
# adapters. Therefore, it is better to raise an error in this case.
msg = "Cannot pass `adapter_names` when there are merged adapters, please call `unmerge_adapter` first."
raise ValueError(msg)
unique_adapters = set(self.active_adapters)
for adapter_name in unique_adapters:
if self.use_dora.get(adapter_name, False):
msg = "Cannot pass `adapter_names` when DoRA is enabled."
raise ValueError(msg)
def _mixed_batch_forward(
self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
) -> torch.Tensor:
# This is a special method that handles the case when users pass the argument `adapter_names`. This is an
# extra argument that allows mixing different adapters in the same batch at inference time.
result = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
unique_adapters = set(adapter_names)
sub_batch_indices_list = []
for adapter in unique_adapters:
sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
for i, active_adapter in enumerate(unique_adapters):
if active_adapter == "__base__":
continue
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
# getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear
# layer output
sub_batch = x[sub_batch_indices_list[i]].to(lora_A.weight.dtype)
lora_output = lora_B(lora_A(dropout(sub_batch))) * scaling
result[sub_batch_indices_list[i]] += lora_output.to(torch_result_dtype)
return result
# Below code is based on https://github.com/microsoft/LoRA/blob/main/loralib/layers.py
# and modified to work with PyTorch FSDP
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
class Linear(nn.Module, LoraLayer):
# Lora implemented in a dense layer
def __init__(
self,
base_layer,
adapter_name: str,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
is_target_conv_1d_layer: bool = False,
init_lora_weights: Union[bool, str] = True,
use_rslora: bool = False,
use_dora: bool = False,
**kwargs,
) -> None:
super().__init__()
LoraLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
init_lora_weights=init_lora_weights,
use_rslora=use_rslora,
use_dora=use_dora,
)
self.is_target_conv_1d_layer = is_target_conv_1d_layer
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.lora_A.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weights = base_layer.weight.data.clone()
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
orig_weights += delta_weight
else:
# handle dora
# since delta_weight already includes scaling, set it to 1 here
weight_norm = (
self.lora_magnitude_vector[active_adapter]
.get_weight_norm(orig_weights, transpose(delta_weight, self.fan_in_fan_out), scaling=1)
.detach()
)
# We need to cache weight_norm because it has to be based on the original weights. We
# cannot calculate it on the fly based on the merged weights when unmerging because its a
# different value
self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
dora_factor = transpose(dora_factor.view(-1, 1), self.fan_in_fan_out)
orig_weights = dora_factor * (orig_weights + delta_weight)
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
base_layer.weight.data += delta_weight
else:
# handle dora
# since delta_weight already includes scaling, set it to 1 here
weight_norm = (
self.lora_magnitude_vector[active_adapter]
.get_weight_norm(
base_layer.weight, transpose(delta_weight, self.fan_in_fan_out), scaling=1
)
.detach()
)
# We need to cache weight_norm because it has to be based on the original weights. We
# cannot calculate it on the fly based on the merged weights when unmerging because its a
# different value
self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
dora_factor = transpose(dora_factor.view(-1, 1), self.fan_in_fan_out)
new_weight = dora_factor * (base_layer.weight.data + delta_weight)
base_layer.weight.data = new_weight
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.lora_A.keys():
weight = self.get_base_layer().weight
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
weight.data -= delta_weight
else:
weight_norm = self._cache_pop(f"{active_adapter}-weight_norm")
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
weight_orig = weight.data / dora_factor.view(-1, 1) - delta_weight
weight.data = weight_orig
def get_delta_weight(self, adapter) -> torch.Tensor:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
device = self.lora_B[adapter].weight.device
dtype = self.lora_B[adapter].weight.dtype
# In case users wants to merge the adapter weights that are in
# (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
# (b)float16 because some CPUs have slow bf16/fp16 matmuls.
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
weight_A = self.lora_A[adapter].weight
weight_B = self.lora_B[adapter].weight
if cast_to_fp32:
weight_A = weight_A.float()
weight_B = weight_B.float()
output_tensor = transpose(weight_B @ weight_A, self.fan_in_fan_out) * self.scaling[adapter]
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
# cast back the weights
self.lora_A[adapter].weight.data = weight_A.to(dtype)
self.lora_B[adapter].weight.data = weight_B.to(dtype)
return output_tensor
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop("adapter_names", None)
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
x = x.to(lora_A.weight.dtype)
if not self.use_dora[active_adapter]:
result = result + lora_B(lora_A(dropout(x))) * scaling
else:
x = dropout(x)
result = result + self.lora_magnitude_vector[active_adapter](
x,
lora_A=lora_A,
lora_B=lora_B,
scaling=scaling,
base_layer=self.get_base_layer(),
)
result = result.to(torch_result_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "lora." + rep
class Embedding(nn.Module, LoraLayer):
# LoRA implemented in a Embedding layer
def __init__(
self,
base_layer: nn.Module,
adapter_name: str,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
init_lora_weights: Union[bool, str] = True,
use_rslora: bool = False,
use_dora: bool = False,
**kwargs,
) -> None:
super().__init__()
LoraLayer.__init__(self, base_layer)
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
init_lora_weights=init_lora_weights,
use_rslora=use_rslora,
use_dora=use_dora,
)
def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora):
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
lora_dropout_layer = nn.Identity()
self.lora_dropout[adapter_name] = lora_dropout_layer
# Actual trainable parameters
weight_A = torch.randn((r, self.in_features))
weight_B = torch.randn((self.out_features, r))
self.lora_embedding_A[adapter_name] = nn.Parameter(weight_A)
self.lora_embedding_B[adapter_name] = nn.Parameter(weight_B)
if use_rslora:
self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
else:
self.scaling[adapter_name] = lora_alpha / r
if init_lora_weights == "loftq":
self.loftq_init(adapter_name)
elif init_lora_weights:
self.reset_lora_parameters(adapter_name, init_lora_weights)
# call this before dora_init
self._move_adapter_to_device_of_base_layer(adapter_name)
if use_dora:
self.dora_init(adapter_name)
self.use_dora[adapter_name] = True
else:
self.use_dora[adapter_name] = False
self.set_adapter(self.active_adapters)
def dora_init(self, adapter_name: str) -> None:
if self.lora_magnitude_vector is None:
# first dora layer being added, add lora_magnitude_vector to the list of learnable parameters
self.adapter_layer_names = self.adapter_layer_names[:] + ("lora_magnitude_vector",)
dora_layer = DoraEmbeddingLayer(fan_in_fan_out=True)
lora_embedding_A = self.lora_embedding_A[adapter_name]
lora_embedding_B = self.lora_embedding_B[adapter_name]
scaling = self.scaling[adapter_name]
dora_layer.update_layer(
base_layer=self.get_base_layer(), lora_A=lora_embedding_A, lora_B=lora_embedding_B, scaling=scaling
)
self.lora_magnitude_vector[adapter_name] = dora_layer
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.lora_embedding_A.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weights = base_layer.weight.data.clone()
orig_weights += self.get_delta_weight(active_adapter)
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += self.get_delta_weight(active_adapter)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.lora_embedding_A.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def get_delta_weight(self, adapter) -> torch.Tensor:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
device = self.lora_embedding_B[adapter].device
dtype = self.lora_embedding_A[adapter].dtype
# In case users wants to merge the adapter weights that are in
# (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
# (b)float16 because some CPUs have slow bf16/fp16 matmuls.
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
weight_A = self.lora_embedding_A[adapter]
weight_B = self.lora_embedding_B[adapter]
if cast_to_fp32:
weight_A = weight_A.float()
weight_B = weight_B.float()
output_tensor = transpose(weight_B @ weight_A, True) * self.scaling[adapter]
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
# cast back the weights
self.lora_embedding_A[adapter] = weight_A.to(dtype)
self.lora_embedding_B[adapter] = weight_B.to(dtype)
return output_tensor
def _mixed_batch_forward(
self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
) -> torch.Tensor:
# This is a special method that handles the case when users pass the argument `adapter_names`. This is an
# extra argument that allows mixing different adapters in the same batch at inference time.
result = self.base_layer(x, *args, **kwargs)
unique_adapters = set(adapter_names)
sub_batch_indices_list = []
for adapter in unique_adapters:
sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
for i, active_adapter in enumerate(unique_adapters):
if active_adapter == "__base__":
continue
if active_adapter not in self.lora_embedding_A.keys():
continue
embedding_A = self.lora_embedding_A[active_adapter].T
embedding_B = self.lora_embedding_B[active_adapter].T
scaling = self.scaling[active_adapter]
# getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear
# layer output
sub_batch = x[sub_batch_indices_list[i]]
after_A = self._embed(sub_batch, embedding_A)
result[sub_batch_indices_list[i]] += (after_A @ embedding_B) * scaling
return result
def _embed(self, input: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
base_layer = self.get_base_layer()
return F.embedding(
input,
weight,
padding_idx=base_layer.padding_idx,
max_norm=base_layer.max_norm,
norm_type=base_layer.norm_type,
scale_grad_by_freq=base_layer.scale_grad_by_freq,
sparse=base_layer.sparse,
)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
# TODO: no dtype conversion here, unlike in Linear, is that correct?
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop("adapter_names", None)
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_embedding_A:
continue
embedding_A = self.lora_embedding_A[active_adapter].T
embedding_B = self.lora_embedding_B[active_adapter].T
scaling = self.scaling[active_adapter]
if not self.use_dora[active_adapter]:
after_A = self._embed(x, embedding_A)
result = result + (after_A @ embedding_B) * scaling
else:
mag_norm_scale, dora_result = self.lora_magnitude_vector[active_adapter](
x,
lora_A=embedding_A,
lora_B=embedding_B,
scaling=scaling,
base_layer=self.get_base_layer(),
embed_fn=self._embed,
)
result = mag_norm_scale * result + dora_result
result = result.to(torch_result_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "lora." + rep
class Conv2d(nn.Module, LoraLayer):
# Lora implemented in a conv2d layer
def __init__(
self,
base_layer: nn.Module,
adapter_name: str,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
init_lora_weights: Union[bool, str] = True,
use_rslora: bool = False,
use_dora: bool = False,
**kwargs,
) -> None:
super().__init__()
LoraLayer.__init__(self, base_layer)
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
init_lora_weights=init_lora_weights,
use_rslora=use_rslora,
use_dora=use_dora,
)
def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora):
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
lora_dropout_layer = nn.Identity()
self.lora_dropout[adapter_name] = lora_dropout_layer
# Actual trainable parameters
base_layer = self.get_base_layer()
kernel_size = base_layer.kernel_size
stride = base_layer.stride
padding = base_layer.padding
self.lora_A[adapter_name] = nn.Conv2d(self.in_features, r, kernel_size, stride, padding, bias=False)
self.lora_B[adapter_name] = nn.Conv2d(r, self.out_features, (1, 1), (1, 1), bias=False)
if use_rslora:
self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
else:
self.scaling[adapter_name] = lora_alpha / r
if init_lora_weights == "loftq":
self.loftq_init(adapter_name)
elif init_lora_weights:
self.reset_lora_parameters(adapter_name, init_lora_weights)
# call this before dora_init
self._move_adapter_to_device_of_base_layer(adapter_name)
if use_dora:
self.dora_init(adapter_name)
self.use_dora[adapter_name] = True
else:
self.use_dora[adapter_name] = False
self.set_adapter(self.active_adapters)
def dora_init(self, adapter_name: str) -> None:
if self.lora_magnitude_vector is None:
# first dora layer being added, add lora_magnitude_vector to the list of learnable parameters
self.adapter_layer_names = self.adapter_layer_names[:] + ("lora_magnitude_vector",)
dora_layer = DoraConv2dLayer(fan_in_fan_out=False)
lora_A = self.lora_A[adapter_name].weight
lora_B = self.lora_B[adapter_name].weight
scaling = self.scaling[adapter_name]
dora_layer.update_layer(base_layer=self.get_base_layer(), lora_A=lora_A, lora_B=lora_B, scaling=scaling)
self.lora_magnitude_vector[adapter_name] = dora_layer
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights inside the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.lora_A.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weights = base_layer.weight.data.clone()
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
orig_weights += delta_weight
else:
# handle dora
# since delta_weight already includes scaling, set it to 1 here
weight_norm = (
self.lora_magnitude_vector[active_adapter]
.get_weight_norm(orig_weights, delta_weight, scaling=1)
.detach()
)
# We need to cache weight_norm because it has to be based on the original weights. We
# cannot calculate it on the fly based on the merged weights when unmerging because its a
# different value
self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
orig_weights = dora_factor.view(-1, 1, 1, 1) * (orig_weights + delta_weight)
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
base_layer.weight.data += delta_weight
else:
# handle dora
# since delta_weight already includes scaling, set it to 1 here
weight_norm = (
self.lora_magnitude_vector[active_adapter]
.get_weight_norm(base_layer.weight, delta_weight, scaling=1)
.detach()
)
# We need to cache weight_norm because it has to be based on the original weights. We
# cannot calculate it on the fly based on the merged weights when unmerging because its a
# different value
self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
new_weight = dora_factor.view(-1, 1, 1, 1) * (base_layer.weight.data + delta_weight)
base_layer.weight.data = new_weight
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.lora_A.keys():
weight = self.get_base_layer().weight
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
weight.data -= delta_weight
else:
weight_norm = self._cache_pop(f"{active_adapter}-weight_norm")
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
weight_orig = weight.data / dora_factor.view(-1, 1, 1, 1) - delta_weight
weight.data = weight_orig
def get_delta_weight(self, adapter) -> torch.Tensor:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
device = self.lora_B[adapter].weight.device
dtype = self.lora_A[adapter].weight.dtype
# In case users wants to merge the adapter weights that are in
# (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
# (b)float16 because some CPUs have slow bf16/fp16 matmuls.
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
weight_A = self.lora_A[adapter].weight
weight_B = self.lora_B[adapter].weight
if cast_to_fp32:
weight_A = weight_A.float()
weight_B = weight_B.float()
# https://github.com/bmaltais/kohya_ss/blob/feb6728762a8f463d15ba936d189d4c3abfaa1ab/networks/lora.py#L117
if self.get_base_layer().weight.size()[2:4] == (1, 1):
# conv2d 1x1
output_tensor = (weight_B.squeeze(3).squeeze(2) @ weight_A.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(
3
) * self.scaling[adapter]
else:
# conv2d 3x3
output_tensor = (
F.conv2d(
weight_A.permute(1, 0, 2, 3),
weight_B,
).permute(1, 0, 2, 3)
* self.scaling[adapter]
)
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
# cast back the weights
self.lora_A[adapter].weight.data = weight_A.to(dtype)
self.lora_B[adapter].weight.data = weight_B.to(dtype)
return output_tensor
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop("adapter_names", None)
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
x = x.to(lora_A.weight.dtype)
if not self.use_dora[active_adapter]:
result = result + lora_B(lora_A(dropout(x))) * scaling
else:
x = dropout(x)
result = result + self.lora_magnitude_vector[active_adapter](
x,
lora_A=lora_A,
lora_B=lora_B,
scaling=scaling,
base_layer=self.get_base_layer(),
)
result = result.to(torch_result_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "lora." + rep
def dispatch_default(
target: torch.nn.Module,
adapter_name: str,
lora_config: LoraConfig,
**kwargs,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Embedding):
embedding_kwargs = kwargs.copy()
embedding_kwargs.pop("fan_in_fan_out", None)
embedding_kwargs.update(lora_config.loftq_config)
new_module = Embedding(target, adapter_name, **embedding_kwargs)
elif isinstance(target_base_layer, torch.nn.Conv2d):
kwargs.update(lora_config.loftq_config)
new_module = Conv2d(target, adapter_name, **kwargs)
elif isinstance(target_base_layer, torch.nn.Linear):
if kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
"Setting fan_in_fan_out to False."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
kwargs.update(lora_config.loftq_config)
new_module = Linear(target, adapter_name, **kwargs)
elif isinstance(target_base_layer, Conv1D):
if not kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to False but the target module is `Conv1D`. " "Setting fan_in_fan_out to True."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
kwargs.update(lora_config.loftq_config)
new_module = Linear(target, adapter_name, is_target_conv_1d_layer=True, **kwargs)
return new_module
|
peft/src/peft/tuners/lora/layer.py/0
|
{
"file_path": "peft/src/peft/tuners/lora/layer.py",
"repo_id": "peft",
"token_count": 24562
}
| 199
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Regression testing: check that checkpoints from previous PEFT versions still return the same values.
#
# For normal regression testing, just run:
#
# `pytest tests/regression/test_regression.py -s --regression`
#
# Add `-s` to show potentially useful debugging information. `--regression` is a custom marker that is required for
# regression tests not to be skipped.
#
# To create new regression tests, run:
# `HF_TOKEN=<token> REGRESSION_CREATION_MODE=True pytest tests/regression/test_regression.py -s --regression`
#
# This will *fail* if:
#
# 1. the git worktree is dirty
# 2. the git commit is not tagged
#
# Note: A Hugging Face Hub token is required to upload the regression artifacts to our
# https://huggingface.co/peft-internal-testing repo. This can be done by anyone with write access to the repo but
# apparently it is not possible to create a technical token with write access.
#
# This is important to ensure that the regression artifacts correspond to a specific released version of PEFT.
# Therefore, it is recommended to checkout the tag before running the regression tests, e.g. by running:
#
# `git checkout v0.1.0`
#
# To override these checks, run:
# ``HF_TOKEN=<token> REGRESSION_CREATION_MODE=True REGRESSION_FORCE_MODE=True pytest tests/regression/test_regression.py -s --regression`
#
# In REGRESSION_CREATION_MODE, one directory will be created in tests/regression/<TEST_NAME>/<PEFT_VERSION>/ for each
# test. This will contain the saved adapter, as well as the output of the test of the model for that version.
#
# In normal testing mode, the saved adapter and output for each version found in the directory
# tests/regression/<TEST_NAME>/ will be loaded and compared to the current output.
#
# When implementing new tests, check the existing ones as well as the description in the docstring of RegressionTester.
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
import pytest
import torch
from huggingface_hub import snapshot_download, upload_folder
from torch import nn
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
from transformers.pytorch_utils import Conv1D
import peft
from peft import (
AdaLoraConfig,
BOFTConfig,
IA3Config,
LNTuningConfig,
LoHaConfig,
LoKrConfig,
LoraConfig,
PeftModel,
VeraConfig,
get_peft_model,
)
from peft.utils import infer_device
PEFT_VERSION = peft.__version__
REGRESSION_DIR = tempfile.mkdtemp(prefix="peft_regression_")
HF_TOKEN = os.environ.get("HF_TOKEN")
# the repo has to be created manually once, it is not automatically created
HF_REPO = "peft-internal-testing/regression-tests"
@pytest.fixture(scope="session", autouse=True)
def setup_tearndown():
# Use a pytest session-scoped fixture to setup and teardown exactly once per session. AFAICT, unittest does not
# provide such a feature
# download regression artifacts from Hugging Face Hub at the start
snapshot_download(
repo_id=HF_REPO,
local_dir=REGRESSION_DIR,
# Don't use symlink, because this prevents us from properly cleaning up the files once finished
local_dir_use_symlinks=False,
)
yield
# delete regression artifacts at the end of the test session; optionally, upload them first if in creation mode
creation_mode = strtobool(os.environ.get("REGRESSION_CREATION_MODE", "False"))
if creation_mode:
# upload the regression directory to Hugging Face Hub, will overwrite by default
upload_folder(
repo_id=HF_REPO,
folder_path=REGRESSION_DIR,
token=HF_TOKEN,
)
shutil.rmtree(REGRESSION_DIR)
def strtobool(val):
"""Copied from distutils.util"""
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return 1
elif val in ("n", "no", "f", "false", "off", "0"):
return 0
else:
raise ValueError(f"invalid truth value {val!r}")
# same as in ..testing_utils.py but cannot be imported
def require_torch_gpu(test_case):
"""
Decorator marking a test that requires a GPU. Will be skipped when no GPU is available.
Copies from tsting_utils.py.
"""
if not torch.cuda.is_available():
return unittest.skip("test requires GPU")(test_case)
else:
return test_case
# same as in ..testing_utils.py but cannot be imported
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library is not installed.
Copies from tsting_utils.py.
"""
try:
import bitsandbytes # noqa: F401
except ImportError:
return unittest.skip("test requires bitsandbytes")(test_case)
else:
return test_case
def save_output(output, name, force=False):
path = os.path.join(REGRESSION_DIR, name, PEFT_VERSION)
filename = os.path.join(path, "output.pt")
if os.path.exists(filename) and not force:
return
if not os.path.exists(path):
os.makedirs(path)
if os.path.exists(filename) and force:
print(f"Overriding existing output in {filename}", file=sys.stderr)
torch.save(output, filename)
def save_model(model, name, force=False):
path = os.path.join(REGRESSION_DIR, name, PEFT_VERSION)
filename = os.path.join(path, peft.utils.SAFETENSORS_WEIGHTS_NAME)
if os.path.exists(filename) and not force:
return
if not os.path.exists(path):
os.makedirs(path)
if os.path.exists(filename) and force:
print(f"Overriding existing model in {path}", file=sys.stderr)
model.save_pretrained(path)
def load_output(name):
filename = os.path.join(REGRESSION_DIR, name, "output.pt")
return torch.load(filename, map_location=infer_device())
@pytest.mark.regression
class RegressionTester(unittest.TestCase):
"""Base class for regression testing
Child classes must call assert_results_equal_or_store and pass the model outtput, as well as a unique name that
describes the setting (e.g. "lora_opt-350m_bnb_4bit"). They also need to implement get_output(model) to get the
model output, and load_base_model(name) to load the base model. Don't forget to fix the seed in load_base_model.
"""
torch_device = infer_device()
def setUp(self):
self.tol = 1e-4
self.creation_mode = strtobool(os.environ.get("REGRESSION_CREATION_MODE", "False"))
self.force_mode = strtobool(os.environ.get("REGRESSION_FORCE_MODE", "False"))
if self.force_mode and not self.creation_mode:
raise RuntimeError("REGRESSION_FORCE_MODE can only be used together with REGRESSION_CREATION_MODE")
if self.creation_mode:
self.check_clean_git_status(self.force_mode)
if HF_TOKEN is None:
raise RuntimeError("HF_TOKEN environment variable must be set in creation mode")
def fix_seed(self):
torch.manual_seed(0)
def check_clean_git_status(self, force):
"""Ensure that worktree is not dirty and version tag is checked out"""
# check that the worktree is clean
try:
subprocess.check_output(["git", "diff", "--quiet", "HEAD"])
except subprocess.CalledProcessError as exc:
if force:
print("Overriding despite dirty git worktree", file=sys.stderr)
else:
raise RuntimeError("Git worktree is dirty") from exc
# check that the commit is tagged
try:
subprocess.check_output(["git", "describe", "--exact-match", "HEAD"])
except subprocess.CalledProcessError as exc:
if force:
print("Overriding despite non-tagged commit", file=sys.stderr)
else:
raise RuntimeError("Git commit is not tagged") from exc
def assert_results_equal_or_store(self, model, name):
"""Check if the outputs are the same or save the outputs if in creation mode."""
if not self.creation_mode: # normal regression testing mode
self._assert_results_equal(name)
else:
output = self.get_output(model)
if not torch.isfinite(output).all():
raise RuntimeError(f"Model output for {name} is not finite")
output2 = self.get_output(model)
if not torch.allclose(output, output2):
raise RuntimeError(f"Model output for {name} is not deterministic")
save_output(output, name, force=self.force_mode)
save_model(model, name, force=self.force_mode)
def _assert_results_equal(self, name):
path = os.path.join(REGRESSION_DIR, name)
versions = os.listdir(path)
for version in versions: # each directory corresponds to a version
output_loaded = load_output(os.path.join(name, version))
base_model = self.load_base_model()
model = PeftModel.from_pretrained(base_model, os.path.join(path, version))
output = self.get_output(model)
assert torch.allclose(output_loaded, output, atol=self.tol, rtol=self.tol)
def get_output(self, model):
raise NotImplementedError
def load_base_model(self):
raise NotImplementedError
##############
# TEST CASES #
##############
class TestMlp(RegressionTester):
def get_output(self, model):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
with torch.inference_mode():
output = model(input)
return output
def load_base_model(self):
class MLP(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.relu = nn.ReLU()
self.lin1 = nn.Linear(20, 2, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float()
X = self.lin0(X)
X = self.relu(X)
X = self.lin1(X)
X = self.sm(X)
return X
self.fix_seed()
return MLP().to(self.torch_device)
def test_lora(self):
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
target_modules=["lin0"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_mlp")
def test_lora_dora(self):
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
target_modules=["lin0"],
use_dora=True,
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_dora_mlp")
def test_adalora(self):
base_model = self.load_base_model()
config = AdaLoraConfig(
r=8,
init_lora_weights=False,
target_modules=["lin0"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "adalora_mlp")
def test_ia3(self):
base_model = self.load_base_model()
config = IA3Config(
init_ia3_weights=False,
target_modules=["lin0"],
feedforward_modules=["lin0"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "ia3_mlp")
def test_ia3_no_ff(self):
base_model = self.load_base_model()
config = IA3Config(
init_ia3_weights=False,
target_modules=["lin0"],
feedforward_modules=[],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "ia3_no_ff_mlp")
def test_loha(self):
# TODO
self.skipTest("Skipping LoHa for now because init is not seedable")
base_model = self.load_base_model()
config = LoHaConfig(
r=8,
init_weights=False,
target_modules=["lin0"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "loha_mlp")
def test_lokr(self):
# TODO
self.skipTest("Skipping LoKr for now because init is not seedable")
base_model = self.load_base_model()
config = LoKrConfig(
r=8,
target_modules=["lin0"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lokr_mlp")
def test_lora_modules_to_save(self):
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
target_modules=["lin0"],
modules_to_save=["lin1"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_mlp_modules_to_save")
def test_boft(self):
base_model = self.load_base_model()
config = BOFTConfig(
boft_block_size=2,
target_modules=["lin0"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "boft_mlp")
def test_ln_tuning(self):
base_model = self.load_base_model()
config = LNTuningConfig(target_modules=["lin0"])
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "ln_tuning_mlp")
def test_vera_tuning(self):
base_model = self.load_base_model()
config = VeraConfig(target_modules=["lin0"])
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "vera_tuning_mlp")
class TestLoraEmbConv1D(RegressionTester):
def get_output(self, model):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
with torch.inference_mode():
output = model(input)
return output
def load_base_model(self):
class ModelEmbConv1D(nn.Module):
def __init__(self):
super().__init__()
self.emb = nn.Embedding(100, 5)
self.conv1d = Conv1D(1, 5)
self.relu = nn.ReLU()
self.flat = nn.Flatten()
self.lin0 = nn.Linear(10, 2)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = self.emb(X)
X = self.conv1d(X)
X = self.relu(X)
X = self.flat(X)
X = self.lin0(X)
X = self.sm(X)
return X
self.fix_seed()
return ModelEmbConv1D().to(self.torch_device)
def test_lora(self):
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
target_modules=["emb", "conv1d"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_emb_conv1d")
class TestLoraConv2D(RegressionTester):
def get_output(self, model):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
with torch.inference_mode():
output = model(input)
return output
def load_base_model(self):
class ModelConv2D(nn.Module):
def __init__(self):
super().__init__()
self.conv2d = nn.Conv2d(5, 10, 3)
self.relu = nn.ReLU()
self.flat = nn.Flatten()
self.lin0 = nn.Linear(10, 2)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float().reshape(2, 5, 3, 3)
X = self.conv2d(X)
X = self.relu(X)
X = self.flat(X)
X = self.lin0(X)
X = self.sm(X)
return X
self.fix_seed()
return ModelConv2D().to(self.torch_device)
def test_lora(self):
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
target_modules=["conv2d"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_conv2d")
def test_ia3(self):
base_model = self.load_base_model()
config = IA3Config(
init_ia3_weights=False,
target_modules=["conv2d"],
feedforward_modules=["conv2d"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "ia3_conv2d")
def test_loha(self):
# TODO
self.skipTest("Skipping LoHa for now because init is not seedable")
base_model = self.load_base_model()
config = LoHaConfig(
r=8,
init_weights=False,
target_modules=["conv2d"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "loha_conv2d")
def test_lokr(self):
# TODO
self.skipTest("Skipping LoKr for now because init is not seedable")
base_model = self.load_base_model()
config = LoKrConfig(
r=8,
init_weights=False,
target_modules=["conv2d"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lokr_conv2d")
def test_boft(self):
base_model = self.load_base_model()
config = BOFTConfig(
boft_block_size=3,
target_modules=["conv2d"],
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "boft_conv2d")
class TestOpt(RegressionTester):
def get_output(self, model):
input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device)
with torch.inference_mode():
output = model(input).logits
return output
def load_base_model(self):
self.fix_seed()
return AutoModelForCausalLM.from_pretrained("facebook/opt-350m").to(self.torch_device)
def test_lora(self):
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_opt-350m")
def test_adalora(self):
base_model = self.load_base_model()
config = AdaLoraConfig(
r=8,
init_lora_weights=False,
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "adalora_opt-350m")
def test_ia3(self):
base_model = self.load_base_model()
config = IA3Config(init_ia3_weights=False)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "ia3_opt-350m")
@require_torch_gpu
@require_bitsandbytes
class TestOpt8bitBnb(RegressionTester):
def get_output(self, model):
input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device)
with torch.inference_mode():
output = model(input).logits
return output
def load_base_model(self):
self.fix_seed()
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-350m",
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
return model
def test_lora_8bit(self):
# Warning: bnb results can vary significantly depending on the GPU. Therefore, if there is a change in GPU used
# in the CI, the test can fail without any code change. In that case, delete the regression artifact and create
# a new one using the new GPU.
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_opt-350m_bnb_8bit")
def test_adalora(self):
# TODO
self.skipTest(
"Skipping AdaLora for now, getting TypeError: unsupported operand type(s) for +=: 'dict' and 'Tensor'"
)
# Warning: bnb results can vary significantly depending on the GPU. Therefore, if there is a change in GPU used
# in the CI, the test can fail without any code change. In that case, delete the regression artifact and create
# a new one using the new GPU.
base_model = self.load_base_model()
config = AdaLoraConfig(
init_r=6,
target_r=4,
tinit=50,
tfinal=100,
deltaT=5,
beta1=0.3,
beta2=0.3,
orth_reg_weight=0.2,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "adalora_opt-350m_8bit")
@require_torch_gpu
@require_bitsandbytes
class TestOpt4bitBnb(RegressionTester):
def get_output(self, model):
input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device)
with torch.inference_mode():
output = model(input).logits
return output
def load_base_model(self):
self.fix_seed()
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=False,
bnb_4bit_compute_dtype=torch.float32,
)
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-350m",
quantization_config=bnb_config,
torch_dtype=torch.float32,
)
return model
def test_lora_4bit(self):
# Warning: bnb results can vary significantly depending on the GPU. Therefore, if there is a change in GPU used
# in the CI, the test can fail without any code change. In that case, delete the regression artifact and create
# a new one using the new GPU.
base_model = self.load_base_model()
config = LoraConfig(
r=8,
init_lora_weights=False,
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "lora_opt-350m_bnb_4bit")
def test_adalora(self):
# TODO
self.skipTest("Skipping AdaLora for now because of a bug, see #1113")
# Warning: bnb results can vary significantly depending on the GPU. Therefore, if there is a change in GPU used
# in the CI, the test can fail without any code change. In that case, delete the regression artifact and create
# a new one using the new GPU.
base_model = self.load_base_model()
config = AdaLoraConfig(
init_r=6,
target_r=4,
tinit=50,
tfinal=100,
deltaT=5,
beta1=0.3,
beta2=0.3,
orth_reg_weight=0.2,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "adalora_opt-350m_4bit")
|
peft/tests/regression/test_regression.py/0
|
{
"file_path": "peft/tests/regression/test_regression.py",
"repo_id": "peft",
"token_count": 10756
}
| 200
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import itertools
import os
import re
import tempfile
import unittest
import pytest
import torch
from parameterized import parameterized
from torch import nn
from transformers import AutoModelForCausalLM
from peft import (
AdaLoraConfig,
LoHaConfig,
LoKrConfig,
LoraConfig,
OFTConfig,
PeftMixedModel,
PrefixTuningConfig,
get_peft_model,
)
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import infer_device
class SimpleNet(nn.Module):
def __init__(self, bias=True):
super().__init__()
# note: out_features must be > rank or else OFT will be an identity transform
self.lin0 = nn.Linear(10, 20, bias=bias)
self.relu = nn.ReLU()
self.lin1 = nn.Linear(20, 16, bias=bias)
def forward(self, X):
X = X.float()
X = self.lin0(X)
X = self.relu(X)
X = self.lin1(X)
return X
def _param_name_func(testcase_func, param_num, params):
# for parameterized tests in TextMixedAdapterTypes
config0, config1 = params[0]
name0 = config0.__class__.__name__[: -len("Config")]
name1 = config1.__class__.__name__[: -len("Config")]
if name0 != name1:
return f"{testcase_func.__name__}_{param_num}_{name0}_{name1}"
return f"{testcase_func.__name__}_{param_num}_{name0}_x2"
class TestMixedAdapterTypes(unittest.TestCase):
torch_device = infer_device()
def _get_model(self, model_cls, peft_config=None, adapter_name=None, seed=0, mixed=True):
torch.manual_seed(0) # always use seed 0 for base model, seed for adapters may differ
base_model = model_cls().eval().to(self.torch_device)
if peft_config is None:
return base_model
torch.manual_seed(seed)
assert adapter_name is not None
peft_model = get_peft_model(base_model, peft_config, adapter_name=adapter_name, mixed=mixed)
return peft_model.eval().to(self.torch_device)
def _check_mixed_outputs(self, model_cls, config0, config1, input, *, is_commutative):
# This test checks different combinations of adapter0, adapter1, or combinations of the two, and whether
# outputs are the same/different, depending on context. If we pass is_commutative=True, it means that the order
# of adapters does not matter, and we expect the same output regardless of the order in which adapters are
# applied.
# We have to very careful with resetting the random seed each time it is used, otherwise the adapters may be
# initialized with different values, and the test will fail.
atol = 1e-5
rtol = 1e-5
seed0 = 0
seed1 = 1
# base model
base_model = self._get_model(model_cls)
output_base = base_model(input)
assert torch.isfinite(output_base).all()
# adapter 0
peft_model_0 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
output_config0 = peft_model_0(input)
assert torch.isfinite(output_config0).all()
assert not torch.allclose(output_base, output_config0, atol=atol, rtol=rtol)
# adapter 1
peft_model_1 = self._get_model(model_cls, config1, "adapter1", seed=seed1)
output_config1 = peft_model_1(input)
assert torch.isfinite(output_config1).all()
assert not torch.allclose(output_base, output_config1, atol=atol, rtol=rtol)
assert not torch.allclose(output_config0, output_config1, atol=atol, rtol=rtol)
# adapter 0 + 1
peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
torch.manual_seed(seed1)
peft_model_01.add_adapter("adapter1", config1)
peft_model_01.set_adapter(["adapter0", "adapter1"])
output_mixed_01 = peft_model_01(input)
# check the number of tuner layer types
tuner_layers = [mod for mod in peft_model_01.modules() if isinstance(mod, BaseTunerLayer)]
tuner_types = {type(tuner_layer) for tuner_layer in tuner_layers}
if type(config0) is type(config1):
assert len(tuner_types) == 1
else:
assert len(tuner_types) == 2
assert peft_model_01.active_adapters == ["adapter0", "adapter1"]
assert torch.isfinite(output_mixed_01).all()
assert not torch.allclose(output_config0, output_mixed_01, atol=atol, rtol=rtol)
assert not torch.allclose(output_config1, output_mixed_01, atol=atol, rtol=rtol)
if is_commutative:
delta0 = output_config0 - output_base
delta1 = output_config1 - output_base
delta_mixed_01 = output_mixed_01 - output_base
assert torch.allclose((delta0 + delta1), delta_mixed_01, atol=atol, rtol=rtol)
# adapter 1 + 0
peft_model_10 = self._get_model(model_cls, config1, "adapter1", seed=seed1)
torch.manual_seed(seed0)
peft_model_10.add_adapter("adapter0", config0)
peft_model_10.set_adapter(["adapter1", "adapter0"])
output_mixed_10 = peft_model_10(input)
# check the number of tuner layer types
tuner_layers = [mod for mod in peft_model_10.modules() if isinstance(mod, BaseTunerLayer)]
tuner_types = {type(tuner_layer) for tuner_layer in tuner_layers}
if type(config0) is type(config1):
assert len(tuner_types) == 1
else:
assert len(tuner_types) == 2
assert peft_model_10.active_adapters == ["adapter1", "adapter0"]
assert torch.isfinite(output_mixed_10).all()
assert not torch.allclose(output_config0, output_mixed_10, atol=atol, rtol=rtol)
assert not torch.allclose(output_config1, output_mixed_10, atol=atol, rtol=rtol)
if is_commutative:
assert torch.allclose(output_mixed_01, output_mixed_10, atol=atol, rtol=rtol)
# turn around the order of the adapters of the 0 + 1 mixed model, should behave like the 0 + 1 mixed model
peft_model_10.set_adapter(["adapter0", "adapter1"])
output_mixed_reversed = peft_model_10(input)
# check the number of tuner layer types
tuner_layers = [mod for mod in peft_model_10.modules() if isinstance(mod, BaseTunerLayer)]
tuner_types = {type(tuner_layer) for tuner_layer in tuner_layers}
if type(config0) is type(config1):
assert len(tuner_types) == 1
else:
assert len(tuner_types) == 2
assert peft_model_10.active_adapters == ["adapter0", "adapter1"]
assert torch.isfinite(output_mixed_reversed).all()
assert not torch.allclose(output_mixed_reversed, output_config0, atol=atol, rtol=rtol)
assert not torch.allclose(output_mixed_reversed, output_config1, atol=atol, rtol=rtol)
if is_commutative:
assert torch.allclose(output_mixed_reversed, output_mixed_01, atol=atol, rtol=rtol)
assert torch.allclose(output_mixed_reversed, output_mixed_10, atol=atol, rtol=rtol)
def _check_merging(self, model_cls, config0, config1, input):
# Ensure that when merging mixed adapters, the result is the same as when applying the adapters separately.
# Merging requires a bit higher tolerance for some adapters, which can also vary depending on CPU vs GPU.
atol = 1e-4
rtol = 1e-4
seed0 = 0
seed1 = 1
# adapter 0 + 1
peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
torch.manual_seed(seed1)
peft_model_01.add_adapter("adapter1", config1)
peft_model_01.set_adapter(["adapter0", "adapter1"])
output_mixed_01 = peft_model_01(input)
model_merged_01 = peft_model_01.merge_and_unload()
output_merged_01 = model_merged_01(input)
assert torch.allclose(output_mixed_01, output_merged_01, atol=atol, rtol=rtol)
# adapter 1 + 0
peft_model_10 = self._get_model(model_cls, config1, "adapter1", seed=seed1)
torch.manual_seed(seed0)
peft_model_10.add_adapter("adapter0", config0)
peft_model_10.set_adapter(["adapter1", "adapter0"])
output_mixed_10 = peft_model_10(input)
model_merged_10 = peft_model_10.merge_and_unload()
output_merged_10 = model_merged_10(input)
assert torch.allclose(output_mixed_10, output_merged_10, atol=atol, rtol=rtol)
def _check_unload(self, model_cls, config0, config1, input):
# Ensure that we can unload the base model without merging
atol = 1e-5
rtol = 1e-5
seed0 = 0
seed1 = 1
base_model = self._get_model(model_cls)
output_base = base_model(input)
# adapter 0 + 1
peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
torch.manual_seed(seed1)
peft_model_01.add_adapter("adapter1", config1)
peft_model_01.set_adapter(["adapter0", "adapter1"])
output_mixed = peft_model_01(input)
# unload
model_unloaded = peft_model_01.unload()
output_unloaded = model_unloaded(input)
assert not torch.allclose(output_mixed, output_unloaded, atol=atol, rtol=rtol)
assert torch.allclose(output_base, output_unloaded, atol=atol, rtol=rtol)
def _check_disable(self, model_cls, config0, config1, input):
# Ensure that we can disable adapters
atol = 1e-5
rtol = 1e-5
seed0 = 0
seed1 = 1
# base model
base_model = self._get_model(model_cls)
output_base = base_model(input)
# adapter 0
peft_model_0 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
output_config0 = peft_model_0(input)
with peft_model_0.disable_adapter():
output_disabled0 = peft_model_0(input)
assert not torch.allclose(output_base, output_config0, atol=atol, rtol=rtol)
assert torch.allclose(output_base, output_disabled0, atol=atol, rtol=rtol)
# adapter 1
peft_model_1 = self._get_model(model_cls, config1, "adapter1", seed=seed1)
output_config1 = peft_model_1(input)
with peft_model_1.disable_adapter():
output_disabled1 = peft_model_1(input)
assert not torch.allclose(output_base, output_config1, atol=atol, rtol=rtol)
assert torch.allclose(output_base, output_disabled1, atol=atol, rtol=rtol)
# adapter 0 + 1
peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
torch.manual_seed(seed1)
peft_model_01.add_adapter("adapter1", config1)
peft_model_01.set_adapter(["adapter0", "adapter1"])
output_mixed_01 = peft_model_01(input)
with peft_model_01.disable_adapter():
output_disabled01 = peft_model_01(input)
assert not torch.allclose(output_base, output_mixed_01, atol=atol, rtol=rtol)
assert torch.allclose(output_base, output_disabled01, atol=atol, rtol=rtol)
# adapter 1 + 0
peft_model_10 = self._get_model(model_cls, config1, "adapter1", seed=seed1)
torch.manual_seed(seed0)
peft_model_10.add_adapter("adapter0", config0)
peft_model_10.set_adapter(["adapter1", "adapter0"])
output_mixed_10 = peft_model_10(input)
with peft_model_10.disable_adapter():
output_disabled10 = peft_model_10(input)
assert not torch.allclose(output_base, output_mixed_10, atol=atol, rtol=rtol)
assert torch.allclose(output_base, output_disabled10, atol=atol, rtol=rtol)
def _check_loading(self, model_cls, config0, config1, input, *, is_commutative):
# Check that we can load two adapters into the same model
# Note that we save the adapters using a normal PeftModel because PeftMixModel doesn't support saving yet
atol = 1e-5
rtol = 1e-5
seed0 = 0
seed1 = 1
with tempfile.TemporaryDirectory() as tmp_dirname:
# SAVING
# adapter 0: note that we set mixed=False because mixed models don't support saving (yet)
peft_model_0 = self._get_model(model_cls, config0, "adapter0", seed=seed0, mixed=False)
output_config0 = peft_model_0(input)
peft_model_0.save_pretrained(os.path.join(tmp_dirname, "adapter0"))
# adapter 1: note that we set mixed=False because mixed models don't support saving (yet)
peft_model_1 = self._get_model(model_cls, config1, "adapter1", seed=seed1, mixed=False)
output_config1 = peft_model_1(input)
peft_model_1.save_pretrained(os.path.join(tmp_dirname, "adapter1"))
# adapter 0 + 1
peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0)
torch.manual_seed(seed1)
peft_model_01.add_adapter("adapter1", config1)
peft_model_01.set_adapter(["adapter0", "adapter1"])
output_mixed_01 = peft_model_01(input)
# adapter 1 + 0
peft_model_10 = self._get_model(model_cls, config1, "adapter1", seed=seed1)
torch.manual_seed(seed0)
peft_model_10.add_adapter("adapter0", config0)
peft_model_10.set_adapter(["adapter1", "adapter0"])
output_mixed_10 = peft_model_10(input)
# LOADING
# adapter 0
base_model = self._get_model(model_cls)
# Notes:
# Path is tmp_dirname/adapter0/adapter0 because non-default adapters are saved in a subfolder.
# As a sanity check, we should set a completely different seed here. That way, we ensure that the the
# weights are not just randomly initialized exactly to the same values as before.
torch.manual_seed(123456)
peft_model_loaded0 = PeftMixedModel.from_pretrained(
base_model, os.path.join(tmp_dirname, "adapter0", "adapter0"), "adapter0"
)
output_loaded0 = peft_model_loaded0(input)
assert torch.allclose(output_config0, output_loaded0, atol=atol, rtol=rtol)
# adapter 1
base_model = self._get_model(model_cls)
torch.manual_seed(654321) # setting a completely different seed here should not affect the result
peft_model_loaded1 = PeftMixedModel.from_pretrained(
base_model, os.path.join(tmp_dirname, "adapter1", "adapter1"), "adapter1"
)
output_loaded1 = peft_model_loaded1(input)
assert torch.allclose(output_config1, output_loaded1, atol=atol, rtol=rtol)
# adapter 0 + 1
base_model = self._get_model(model_cls)
torch.manual_seed(97531) # setting a completely different seed here should not affect the result
peft_model_loaded_01 = PeftMixedModel.from_pretrained(
base_model, os.path.join(tmp_dirname, "adapter0", "adapter0"), "adapter0"
)
peft_model_loaded_01.load_adapter(os.path.join(tmp_dirname, "adapter1", "adapter1"), "adapter1")
# at this point, "adapter0" should still be active
assert peft_model_loaded_01.active_adapters == ["adapter0"]
output_loaded01_0 = peft_model_loaded_01(input)
assert torch.allclose(output_config0, output_loaded01_0, atol=atol, rtol=rtol)
# activate adapter1
peft_model_loaded_01.set_adapter(["adapter1"])
assert peft_model_loaded_01.active_adapters == ["adapter1"]
output_loaded01_1 = peft_model_loaded_01(input)
assert torch.allclose(output_config1, output_loaded01_1, atol=atol, rtol=rtol)
# activate both adapters
peft_model_loaded_01.set_adapter(["adapter0", "adapter1"])
output_loaded01 = peft_model_loaded_01(input)
assert torch.allclose(output_mixed_01, output_loaded01, atol=atol, rtol=rtol)
# adapter 1 + 0
base_model = self._get_model(model_cls)
torch.manual_seed(445566) # setting a completely different seed here should not affect the result
peft_model_loaded_10 = PeftMixedModel.from_pretrained(
base_model, os.path.join(tmp_dirname, "adapter1", "adapter1"), "adapter1"
)
peft_model_loaded_10.load_adapter(os.path.join(tmp_dirname, "adapter0", "adapter0"), "adapter0")
# at this point, "adapter1" should still be active
assert peft_model_loaded_10.active_adapters == ["adapter1"]
output_loaded10_1 = peft_model_loaded_10(input)
assert torch.allclose(output_config1, output_loaded10_1, atol=atol, rtol=rtol)
# activate adapter1
peft_model_loaded_10.set_adapter(["adapter0"])
assert peft_model_loaded_10.active_adapters == ["adapter0"]
output_loaded10_0 = peft_model_loaded_10(input)
assert torch.allclose(output_config0, output_loaded10_0, atol=atol, rtol=rtol)
# activate both adapters
peft_model_loaded_10.set_adapter(["adapter1", "adapter0"])
output_loaded10 = peft_model_loaded_10(input)
assert torch.allclose(output_mixed_10, output_loaded10, atol=atol, rtol=rtol)
if is_commutative:
assert torch.allclose(output_loaded01, output_loaded10, atol=atol, rtol=rtol)
assert torch.allclose(output_loaded10, output_mixed_01, atol=atol, rtol=rtol)
@parameterized.expand(
itertools.combinations(
[
LoraConfig(target_modules=["lin0"], init_lora_weights=False),
LoHaConfig(target_modules=["lin0"], init_weights=False),
LoKrConfig(target_modules=["lin0"], init_weights=False),
AdaLoraConfig(target_modules=["lin0"], init_lora_weights=False),
OFTConfig(target_modules=["lin0"], init_weights=False),
],
r=2,
),
name_func=_param_name_func,
)
def test_target_first_layer(self, config0, config1):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=False)
self._check_merging(SimpleNet, config0, config1, input)
self._check_unload(SimpleNet, config0, config1, input)
self._check_disable(SimpleNet, config1, config0, input)
self._check_loading(SimpleNet, config0, config1, input, is_commutative=False)
@parameterized.expand(
itertools.combinations(
[
LoraConfig(target_modules=["lin1"], init_lora_weights=False),
LoHaConfig(target_modules=["lin1"], init_weights=False),
LoKrConfig(target_modules=["lin1"], init_weights=False),
AdaLoraConfig(target_modules=["lin1"], init_lora_weights=False),
OFTConfig(target_modules=["lin1"], init_weights=False),
],
r=2,
),
name_func=_param_name_func,
)
def test_target_last_layer(self, config0, config1):
# We are targeting the last layer of the SimpleNet. Therefore, since the adapters only add their activations
# to the output, the results should be commutative. This would *not* work if the adapters do something more
# complex or if we target an earlier layer, because of the non-linearity would destroy the commutativity.
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
# OFT is not commutative, as it's not a linear operation on the inputs
is_commutative = not any(isinstance(config, OFTConfig) for config in [config0, config1])
self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=is_commutative)
self._check_merging(SimpleNet, config0, config1, input)
self._check_unload(SimpleNet, config0, config1, input)
self._check_disable(SimpleNet, config1, config0, input)
self._check_loading(SimpleNet, config0, config1, input, is_commutative=is_commutative)
@parameterized.expand(
itertools.combinations(
[
LoraConfig(init_lora_weights=False),
LoHaConfig(init_weights=False),
LoKrConfig(init_weights=False),
AdaLoraConfig(init_lora_weights=False),
OFTConfig(init_weights=False),
],
r=2,
),
name_func=_param_name_func,
)
def test_target_different_layers(self, config0, config1):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
config0.target_modules = ["lin0"]
config1.target_modules = ["lin1"]
self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=False)
self._check_merging(SimpleNet, config0, config1, input)
self._check_unload(SimpleNet, config0, config1, input)
self._check_disable(SimpleNet, config0, config1, input)
self._check_loading(SimpleNet, config0, config1, input, is_commutative=False)
# same, but switch target_modules around
config0.target_modules = ["lin1"]
config1.target_modules = ["lin0"]
self._check_mixed_outputs(SimpleNet, config1, config0, input, is_commutative=False)
self._check_merging(SimpleNet, config1, config0, input)
self._check_unload(SimpleNet, config1, config0, input)
self._check_disable(SimpleNet, config1, config0, input)
self._check_loading(SimpleNet, config1, config0, input, is_commutative=False)
@parameterized.expand(
[
(
LoraConfig(target_modules=["lin1"], init_lora_weights=False),
LoraConfig(target_modules=["lin1"], init_lora_weights=False),
),
(
LoHaConfig(target_modules=["lin1"], init_weights=False),
LoHaConfig(target_modules=["lin1"], init_weights=False),
),
(
LoKrConfig(target_modules=["lin1"], init_weights=False),
LoKrConfig(target_modules=["lin1"], init_weights=False),
),
(
AdaLoraConfig(target_modules=["lin1"], init_lora_weights=False),
AdaLoraConfig(target_modules=["lin1"], init_lora_weights=False),
),
(
OFTConfig(target_modules=["lin1"], init_weights=False),
OFTConfig(target_modules=["lin1"], init_weights=False),
),
],
name_func=_param_name_func,
)
def test_target_last_layer_same_type(self, config0, config1):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
# OFT is not commutative, as it's not a linear operation on the inputs
is_commutative = not any(isinstance(config, OFTConfig) for config in [config0, config1])
self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=is_commutative)
self._check_merging(SimpleNet, config0, config1, input)
self._check_unload(SimpleNet, config0, config1, input)
self._check_disable(SimpleNet, config1, config0, input)
@parameterized.expand(
[
(
LoraConfig(target_modules=["lin0"], init_lora_weights=False),
LoraConfig(target_modules=["lin0"], init_lora_weights=False),
),
(
LoHaConfig(target_modules=["lin0"], init_weights=False),
LoHaConfig(target_modules=["lin0"], init_weights=False),
),
(
LoKrConfig(target_modules=["lin0"], init_weights=False),
LoKrConfig(target_modules=["lin0"], init_weights=False),
),
(
AdaLoraConfig(target_modules=["lin0"], init_lora_weights=False),
AdaLoraConfig(target_modules=["lin0"], init_lora_weights=False),
),
(
OFTConfig(target_modules=["lin0"], init_weights=False),
OFTConfig(target_modules=["lin0"], init_weights=False),
),
],
name_func=_param_name_func,
)
def test_target_first_layer_same_type(self, config0, config1):
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=False)
self._check_merging(SimpleNet, config0, config1, input)
self._check_unload(SimpleNet, config0, config1, input)
self._check_disable(SimpleNet, config1, config0, input)
self._check_loading(SimpleNet, config0, config1, input, is_commutative=False)
def test_deeply_nested(self):
# a somewhat absurdly nested model using different adapter types
atol = 1e-5
rtol = 1e-5
torch.manual_seed(0)
model = SimpleNet().eval().to(self.torch_device)
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
output_base = model(input)
config0 = LoraConfig(r=4, lora_alpha=4, target_modules=["lin0", "lin1"], init_lora_weights=False)
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
config1 = LoHaConfig(r=4, alpha=4, target_modules=["lin0"], init_weights=False)
peft_model.add_adapter("adapter1", config1)
config2 = AdaLoraConfig(r=4, lora_alpha=4, target_modules=["lin1"], init_lora_weights=False)
peft_model.add_adapter("adapter2", config2)
config3 = LoKrConfig(r=4, alpha=4, target_modules=["lin0", "lin1"], init_weights=False)
peft_model.add_adapter("adapter3", config3)
config4 = OFTConfig(r=8, target_modules=["lin0", "lin1"], init_weights=False)
peft_model.add_adapter("adapter4", config4)
peft_model.set_adapter(["adapter0", "adapter1", "adapter2", "adapter3", "adapter4"])
output_mixed = peft_model(input)
assert torch.isfinite(output_base).all()
assert not torch.allclose(output_base, output_mixed, atol=atol, rtol=rtol)
# test disabling all adapters
with peft_model.disable_adapter():
output_disabled = peft_model(input)
assert torch.isfinite(output_disabled).all()
assert torch.allclose(output_base, output_disabled, atol=atol, rtol=rtol)
assert not torch.allclose(output_mixed, output_disabled, atol=atol, rtol=rtol)
# merge and unload all adapters
model_copy = copy.deepcopy(peft_model)
model = model_copy.merge_and_unload()
output_merged = model(input)
assert torch.isfinite(output_merged).all()
assert torch.allclose(output_mixed, output_merged, atol=atol, rtol=rtol)
# merge and unload only adapter1 and adapter3
model_copy = copy.deepcopy(peft_model)
model_copy.set_adapter(["adapter1", "adapter3"])
output_13 = model_copy(input)
assert torch.isfinite(output_13).all()
assert not torch.allclose(output_mixed, output_13, atol=atol, rtol=rtol)
model_copy.set_adapter(["adapter0", "adapter1", "adapter2", "adapter3", "adapter4"])
model_merged_unloaded = model_copy.merge_and_unload(adapter_names=["adapter1", "adapter3"])
output_merged_13 = model_merged_unloaded(input)
assert torch.isfinite(output_merged_13).all()
assert torch.allclose(output_13, output_merged_13, atol=atol, rtol=rtol)
# test unloading
model_copy = copy.deepcopy(peft_model)
model_unloaded = model_copy.unload()
output_unloaded = model_unloaded(input)
assert torch.isfinite(output_unloaded).all()
assert torch.allclose(output_base, output_unloaded, atol=atol, rtol=rtol)
def test_delete_adapter(self):
atol = 1e-5
rtol = 1e-5
torch.manual_seed(0)
model = SimpleNet().eval().to(self.torch_device)
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
output_base = model(input)
# create adapter0
torch.manual_seed(0)
config0 = LoraConfig(r=4, lora_alpha=4, target_modules=["lin0", "lin1"], init_lora_weights=False)
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
output_0 = peft_model(input)
assert not torch.allclose(output_base, output_0, atol=atol, rtol=rtol)
# add adapter1
torch.manual_seed(1)
config1 = LoHaConfig(r=4, alpha=4, target_modules=["lin0"], init_weights=False)
peft_model.add_adapter("adapter1", config1)
peft_model.set_adapter(["adapter0", "adapter1"])
output_01 = peft_model(input)
assert not torch.allclose(output_base, output_01, atol=atol, rtol=rtol)
assert not torch.allclose(output_0, output_01, atol=atol, rtol=rtol)
# delete adapter1
peft_model.delete_adapter("adapter1")
assert peft_model.active_adapters == ["adapter0"]
output_deleted_1 = peft_model(input)
assert torch.allclose(output_0, output_deleted_1, atol=atol, rtol=rtol)
msg = re.escape("Adapter(s) ['adapter1'] not found, available adapters: ['adapter0']")
with pytest.raises(ValueError, match=msg):
peft_model.set_adapter(["adapter0", "adapter1"])
# re-add adapter1
torch.manual_seed(1)
peft_model.add_adapter("adapter1", config1)
peft_model.set_adapter(["adapter0", "adapter1"])
output_01_readded = peft_model(input)
assert not torch.allclose(output_base, output_01_readded, atol=atol, rtol=rtol)
# same as above, but this time delete adapter0 first
torch.manual_seed(0)
model = SimpleNet().eval().to(self.torch_device)
torch.manual_seed(0)
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
torch.manual_seed(1)
peft_model.add_adapter("adapter1", config1)
peft_model.delete_adapter("adapter0")
assert peft_model.active_adapters == ["adapter1"]
output_deleted_0 = peft_model(input)
assert not torch.allclose(output_deleted_0, output_base, atol=atol, rtol=rtol)
assert not torch.allclose(output_deleted_0, output_01, atol=atol, rtol=rtol)
msg = re.escape("Adapter(s) ['adapter0'] not found, available adapters: ['adapter1']")
with pytest.raises(ValueError, match=msg):
peft_model.set_adapter(["adapter0", "adapter1"])
peft_model.delete_adapter("adapter1")
assert peft_model.active_adapters == []
output_deleted_01 = peft_model(input)
assert torch.allclose(output_deleted_01, output_base, atol=atol, rtol=rtol)
def test_modules_to_save(self):
model = SimpleNet().eval().to(self.torch_device)
config0 = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
# adding a second adapter with same modules_to_save is not allowed
# TODO: theoretically, we could allow this if it's the same target layer
config1 = LoHaConfig(target_modules=["lin0"], modules_to_save=["lin1"])
peft_model.add_adapter("adapter1", config1)
with pytest.raises(ValueError, match="Only one adapter can be set at a time for modules_to_save"):
peft_model.set_adapter(["adapter0", "adapter1"])
def test_get_nb_trainable_parameters(self):
model = SimpleNet().eval().to(self.torch_device)
params_base = sum(p.numel() for p in model.parameters())
config0 = LoraConfig(target_modules=["lin0"])
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
trainable_params0, all_param0 = peft_model.get_nb_trainable_parameters()
params_lora = sum(p.numel() for n, p in model.named_parameters() if "adapter0" in n)
assert trainable_params0 == params_lora
assert all_param0 == (params_base + params_lora)
config1 = LoHaConfig(target_modules=["lin1"])
peft_model.add_adapter("adapter1", config1)
peft_model.set_adapter(["adapter0", "adapter1"])
params_loha = sum(p.numel() for n, p in model.named_parameters() if "adapter1" in n)
trainable_params1, all_param1 = peft_model.get_nb_trainable_parameters()
assert trainable_params1 == (params_lora + params_loha)
assert all_param1 == ((params_base + params_lora) + params_loha)
config2 = AdaLoraConfig(target_modules=["lin0", "lin1"])
peft_model.add_adapter("adapter2", config2)
peft_model.set_adapter(["adapter0", "adapter1", "adapter2"])
params_adalora = sum(p.numel() for n, p in model.named_parameters() if "adapter2" in n)
trainable_params2, all_param2 = peft_model.get_nb_trainable_parameters()
# remove 2 params because we need to exclude "ranknum" for AdaLora trainable params
assert trainable_params2 == (((params_lora + params_loha) + params_adalora) - 2)
assert all_param2 == (((params_base + params_lora) + params_loha) + params_adalora)
def test_incompatible_config_raises(self):
model = SimpleNet().eval().to(self.torch_device)
config0 = LoraConfig(target_modules=["lin0"])
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
config1 = PrefixTuningConfig()
msg = "The provided `peft_type` 'PREFIX_TUNING' is not compatible with the `PeftMixedModel`."
with pytest.raises(ValueError, match=msg):
peft_model.add_adapter("adapter1", config1)
def test_decoder_model(self):
# test a somewhat realistic model instead of a toy model
torch.manual_seed(0)
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
model = AutoModelForCausalLM.from_pretrained(model_id).eval().to(self.torch_device)
input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device)
attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
input_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
output_base = model.generate(**input_dict)
torch.manual_seed(0)
config0 = LoraConfig(task_type="CAUSAL_LM", init_lora_weights=False)
peft_model = get_peft_model(model, config0, "adapter0", mixed=True)
output0 = peft_model.generate(**input_dict)
assert torch.isfinite(output0).all()
assert not torch.allclose(output_base, output0)
torch.manual_seed(1)
config1 = LoHaConfig(task_type="CAUSAL_LM", target_modules=["q_proj", "v_proj"], init_weights=False)
peft_model.add_adapter("adapter1", config1)
peft_model.set_adapter(["adapter0", "adapter1"])
output1 = peft_model.generate(**input_dict)
assert torch.isfinite(output1).all()
assert not torch.allclose(output0, output1)
torch.manual_seed(2)
config2 = AdaLoraConfig(task_type="CAUSAL_LM", init_lora_weights=False)
peft_model.add_adapter("adapter2", config2)
peft_model.set_adapter(["adapter0", "adapter1", "adapter2"])
output2 = peft_model.generate(**input_dict)
assert torch.isfinite(output2).all()
assert not torch.allclose(output1, output2)
torch.manual_seed(3)
config3 = LoKrConfig(task_type="CAUSAL_LM", target_modules=["q_proj", "v_proj"], init_weights=False)
peft_model.add_adapter("adapter3", config3)
peft_model.set_adapter(["adapter0", "adapter1", "adapter2", "adapter3"])
output3 = peft_model.generate(**input_dict)
assert torch.isfinite(output3).all()
assert not torch.allclose(output2, output3)
torch.manual_seed(4)
config4 = OFTConfig(task_type="CAUSAL_LM", target_modules=["q_proj", "v_proj"], init_weights=False)
peft_model.add_adapter("adapter4", config4)
peft_model.set_adapter(["adapter0", "adapter1", "adapter2", "adapter3", "adapter4"])
output4 = peft_model.generate(**input_dict)
assert torch.isfinite(output4).all()
assert not torch.allclose(output3, output4)
with peft_model.disable_adapter():
output_disabled = peft_model.generate(**input_dict)
assert torch.isfinite(output_disabled).all()
assert torch.allclose(output_base, output_disabled)
model_unloaded = peft_model.merge_and_unload()
output_unloaded = model_unloaded.generate(**input_dict)
assert torch.isfinite(output_unloaded).all()
assert torch.allclose(output4, output_unloaded)
with tempfile.TemporaryDirectory() as tmp_dir:
# save adapter0 (use normal PeftModel, because PeftMixedModel does not support saving)
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(model_id).eval().to(self.torch_device)
torch.manual_seed(0)
peft_model = get_peft_model(model, config0, "adapter0")
output0_save = peft_model(**input_dict).logits
assert torch.isfinite(output0_save).all()
peft_model.save_pretrained(tmp_dir)
# save adapter1
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(model_id).eval().to(self.torch_device)
torch.manual_seed(1)
peft_model = get_peft_model(model, config1, "adapter1")
output1_save = peft_model(**input_dict).logits
assert torch.isfinite(output1_save).all()
peft_model.save_pretrained(tmp_dir)
# load adapter0 and adapter1
model = AutoModelForCausalLM.from_pretrained(model_id).eval().to(self.torch_device)
peft_model = PeftMixedModel.from_pretrained(model, os.path.join(tmp_dir, "adapter0"), "adapter0")
peft_model.load_adapter(os.path.join(tmp_dir, "adapter1"), "adapter1")
peft_model.set_adapter(["adapter0", "adapter1"])
output01_loaded = peft_model(**input_dict).logits
atol, rtol = 1e-3, 1e-3
assert torch.isfinite(output01_loaded).all()
assert not torch.allclose(output0_save, output01_loaded, atol=atol, rtol=rtol)
assert not torch.allclose(output1_save, output01_loaded, atol=atol, rtol=rtol)
|
peft/tests/test_mixed.py/0
|
{
"file_path": "peft/tests/test_mixed.py",
"repo_id": "peft",
"token_count": 17543
}
| 201
|
# Feature Extraction
All of the models in `timm` have consistent mechanisms for obtaining various types of features from the model for tasks besides classification.
## Penultimate Layer Features (Pre-Classifier Features)
The features from the penultimate model layer can be obtained in several ways without requiring model surgery (although feel free to do surgery). One must first decide if they want pooled or un-pooled features.
### Unpooled
There are three ways to obtain unpooled features. The final, unpooled features are sometimes referred to as the last hidden state. In `timm` this is up to and including the final normalization layer (in e.g. ViT style models) but does not include pooling / class token selection and final post-pooling layers.
Without modifying the network, one can call `model.forward_features(input)` on any model instead of the usual `model(input)`. This will bypass the head classifier and global pooling for networks.
If one wants to explicitly modify the network to return unpooled features, they can either create the model without a classifier and pooling, or remove it later. Both paths remove the parameters associated with the classifier from the network.
#### forward_features()
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('xception41', pretrained=True)
>>> o = m(torch.randn(2, 3, 299, 299))
>>> print(f'Original shape: {o.shape}')
>>> o = m.forward_features(torch.randn(2, 3, 299, 299))
>>> print(f'Unpooled shape: {o.shape}')
```
Output:
```text
Original shape: torch.Size([2, 1000])
Unpooled shape: torch.Size([2, 2048, 10, 10])
```
#### Create with no classifier and pooling
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('resnet50', pretrained=True, num_classes=0, global_pool='')
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Unpooled shape: {o.shape}')
```
Output:
```text
Unpooled shape: torch.Size([2, 2048, 7, 7])
```
#### Remove it later
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('densenet121', pretrained=True)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Original shape: {o.shape}')
>>> m.reset_classifier(0, '')
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Unpooled shape: {o.shape}')
```
Output:
```text
Original shape: torch.Size([2, 1000])
Unpooled shape: torch.Size([2, 1024, 7, 7])
```
#### Chaining unpooled output to classifier
The last hidden state can be fed back into the head of the model using the `forward_head()` function.
```py
>>> model = timm.create_model('vit_medium_patch16_reg1_gap_256', pretrained=True)
>>> output = model.forward_features(torch.randn(2,3,256,256))
>>> print('Unpooled output shape:', output.shape)
>>> classified = model.forward_head(output)
>>> print('Classification output shape:', classified.shape)
```
Output:
```text
Unpooled output shape: torch.Size([2, 257, 512])
Classification output shape: torch.Size([2, 1000])
```
### Pooled
To modify the network to return pooled features, one can use `forward_features()` and pool/flatten the result themselves, or modify the network like above but keep pooling intact.
#### Create with no classifier
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('resnet50', pretrained=True, num_classes=0)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Pooled shape: {o.shape}')
```
Output:
```text
Pooled shape: torch.Size([2, 2048])
```
#### Remove it later
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('ese_vovnet19b_dw', pretrained=True)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Original shape: {o.shape}')
>>> m.reset_classifier(0)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> print(f'Pooled shape: {o.shape}')
```
Output:
```text
Original shape: torch.Size([2, 1000])
Pooled shape: torch.Size([2, 1024])
```
## Multi-scale Feature Maps (Feature Pyramid)
Object detection, segmentation, keypoint, and a variety of dense pixel tasks require access to feature maps from the backbone network at multiple scales. This is often done by modifying the original classification network. Since each network varies quite a bit in structure, it's not uncommon to see only a few backbones supported in any given obj detection or segmentation library.
`timm` allows a consistent interface for creating any of the included models as feature backbones that output feature maps for selected levels.
A feature backbone can be created by adding the argument `features_only=True` to any `create_model` call. By default most models with a feature hierarchy will output up to 5 features up to a reduction of 32. However this varies per model, some models have fewer hierarchy levels, and some (like ViT) have a larger number of non-hierarchical feature maps and they default to outputting the last 3. The `out_indices` arg can be passed to `create_model` to specify which features you want.
### Create a feature map extraction model
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('resnest26d', features_only=True, pretrained=True)
>>> o = m(torch.randn(2, 3, 224, 224))
>>> for x in o:
... print(x.shape)
```
Output:
```text
torch.Size([2, 64, 112, 112])
torch.Size([2, 256, 56, 56])
torch.Size([2, 512, 28, 28])
torch.Size([2, 1024, 14, 14])
torch.Size([2, 2048, 7, 7])
```
### Query the feature information
After a feature backbone has been created, it can be queried to provide channel or resolution reduction information to the downstream heads without requiring static config or hardcoded constants. The `.feature_info` attribute is a class encapsulating the information about the feature extraction points.
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('regnety_032', features_only=True, pretrained=True)
>>> print(f'Feature channels: {m.feature_info.channels()}')
>>> o = m(torch.randn(2, 3, 224, 224))
>>> for x in o:
... print(x.shape)
```
Output:
```text
Feature channels: [32, 72, 216, 576, 1512]
torch.Size([2, 32, 112, 112])
torch.Size([2, 72, 56, 56])
torch.Size([2, 216, 28, 28])
torch.Size([2, 576, 14, 14])
torch.Size([2, 1512, 7, 7])
```
### Select specific feature levels or limit the stride
There are two additional creation arguments impacting the output features.
* `out_indices` selects which indices to output
* `output_stride` limits the feature output stride of the network (also works in classification mode BTW)
#### Output index selection
The `out_indices` argument is supported by all models, but not all models have the same index to feature stride mapping. Look at the code or check feature_info to compare. The out indices generally correspond to the `C(i+1)th` feature level (a `2^(i+1)` reduction). For most convnet models, index 0 is the stride 2 features, and index 4 is stride 32. For many ViT or ViT-Conv hybrids there may be many to all features maps of the same shape, or a combination of hierarchical and non-hierarchical feature maps. It is best to look at the `feature_info` attribute to see the number of features, their corresponding channel count and reduction level.
`out_indices` supports negative indexing, this makes it easy to get the last, penultimate, etc feature map. `out_indices=(-2,)` would return the penultimate feature map for any model.
#### Output stride (feature map dilation)
`output_stride` is achieved by converting layers to use dilated convolutions. Doing so is not always straightforward, some networks only support `output_stride=32`.
```py
>>> import torch
>>> import timm
>>> m = timm.create_model('ecaresnet101d', features_only=True, output_stride=8, out_indices=(2, 4), pretrained=True)
>>> print(f'Feature channels: {m.feature_info.channels()}')
>>> print(f'Feature reduction: {m.feature_info.reduction()}')
>>> o = m(torch.randn(2, 3, 320, 320))
>>> for x in o:
... print(x.shape)
```
Output:
```text
Feature channels: [512, 2048]
Feature reduction: [8, 8]
torch.Size([2, 512, 40, 40])
torch.Size([2, 2048, 40, 40])
```
## Flexible intermediate feature map extraction
In addition to using `features_only` with the model factory, many models support a `forward_intermediates()` method which provides a flexible mechanism for extracting both the intermediate feature maps and the last hidden state (which can be chained to the head). Additionally this method supports some model specific features such as returning class or distill prefix tokens for some models.
Accompanying the `forward_intermediates` function is a `prune_intermediate_layers` function that allows one to prune layers from the model, including both the head, final norm, and/or trailing blocks/stages that are not needed.
An `indices` argument is used for both `forward_intermediates()` and `prune_intermediate_layers()` to select the features to return or layers to remove. As with the `out_indices` for `features_only` API, `indices` is model specific and selects which intermediates are returned.
In non-hierarchical block based models such as ViT the indices correspond to the blocks, in models with hierarchical stages they usually correspond to the output of the stem + each hierarchical stage. Both positive (from the start), and negative (relative to the end) indexing works, and `None` is used to return all intermediates.
The `prune_intermediate_layers()` call returns an indices variable, as negative indices must be converted to absolute (positive) indices when the model is trimmed.
```py
model = timm.create_model('vit_medium_patch16_reg1_gap_256', pretrained=True)
output, intermediates = model.forward_intermediates(torch.randn(2,3,256,256))
for i, o in enumerate(intermediates):
print(f'Feat index: {i}, shape: {o.shape}')
```
```text
Feat index: 0, shape: torch.Size([2, 512, 16, 16])
Feat index: 1, shape: torch.Size([2, 512, 16, 16])
Feat index: 2, shape: torch.Size([2, 512, 16, 16])
Feat index: 3, shape: torch.Size([2, 512, 16, 16])
Feat index: 4, shape: torch.Size([2, 512, 16, 16])
Feat index: 5, shape: torch.Size([2, 512, 16, 16])
Feat index: 6, shape: torch.Size([2, 512, 16, 16])
Feat index: 7, shape: torch.Size([2, 512, 16, 16])
Feat index: 8, shape: torch.Size([2, 512, 16, 16])
Feat index: 9, shape: torch.Size([2, 512, 16, 16])
Feat index: 10, shape: torch.Size([2, 512, 16, 16])
Feat index: 11, shape: torch.Size([2, 512, 16, 16])
```
```py
model = timm.create_model('vit_medium_patch16_reg1_gap_256', pretrained=True)
print('Original params:', sum([p.numel() for p in model.parameters()]))
indices = model.prune_intermediate_layers(indices=(-2,), prune_head=True, prune_norm=True) # prune head, norm, last block
print('Pruned params:', sum([p.numel() for p in model.parameters()]))
intermediates = model.forward_intermediates(torch.randn(2,3,256,256), indices=indices, intermediates_only=True) # return penultimate intermediate
for o in intermediates:
print(f'Feat shape: {o.shape}')
```
```text
Original params: 38880232
Pruned params: 35212800
Feat shape: torch.Size([2, 512, 16, 16])
```
|
pytorch-image-models/hfdocs/source/feature_extraction.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/feature_extraction.mdx",
"repo_id": "pytorch-image-models",
"token_count": 3391
}
| 202
|
# EfficientNet
**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scales network width, depth, and resolution in a principled way.
The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('efficientnet_b0', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `efficientnet_b0`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('efficientnet_b0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{tan2020efficientnet,
title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
author={Mingxing Tan and Quoc V. Le},
year={2020},
eprint={1905.11946},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
<!--
Type: model-index
Collections:
- Name: EfficientNet
Paper:
Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks'
URL: https://paperswithcode.com/paper/efficientnet-rethinking-model-scaling-for
Models:
- Name: efficientnet_b0
In Collection: EfficientNet
Metadata:
FLOPs: 511241564
Parameters: 5290000
File Size: 21376743
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b0
Layers: 18
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1002
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.71%
Top 5 Accuracy: 93.52%
- Name: efficientnet_b1
In Collection: EfficientNet
Metadata:
FLOPs: 909691920
Parameters: 7790000
File Size: 31502706
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b1
Crop Pct: '0.875'
Image Size: '240'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1011
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.71%
Top 5 Accuracy: 94.15%
- Name: efficientnet_b2
In Collection: EfficientNet
Metadata:
FLOPs: 1265324514
Parameters: 9110000
File Size: 36788104
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b2
Crop Pct: '0.875'
Image Size: '260'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1020
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.38%
Top 5 Accuracy: 95.08%
- Name: efficientnet_b2a
In Collection: EfficientNet
Metadata:
FLOPs: 1452041554
Parameters: 9110000
File Size: 49369973
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b2a
Crop Pct: '1.0'
Image Size: '288'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1029
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.61%
Top 5 Accuracy: 95.32%
- Name: efficientnet_b3
In Collection: EfficientNet
Metadata:
FLOPs: 2327905920
Parameters: 12230000
File Size: 49369973
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b3
Crop Pct: '0.904'
Image Size: '300'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1038
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.08%
Top 5 Accuracy: 96.03%
- Name: efficientnet_b3a
In Collection: EfficientNet
Metadata:
FLOPs: 2600628304
Parameters: 12230000
File Size: 49369973
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b3a
Crop Pct: '1.0'
Image Size: '320'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1047
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.25%
Top 5 Accuracy: 96.11%
- Name: efficientnet_em
In Collection: EfficientNet
Metadata:
FLOPs: 3935516480
Parameters: 6900000
File Size: 27927309
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_em
Crop Pct: '0.882'
Image Size: '240'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1118
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.26%
Top 5 Accuracy: 94.79%
- Name: efficientnet_es
In Collection: EfficientNet
Metadata:
FLOPs: 2317181824
Parameters: 5440000
File Size: 22003339
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_es
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1110
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.09%
Top 5 Accuracy: 93.93%
- Name: efficientnet_lite0
In Collection: EfficientNet
Metadata:
FLOPs: 510605024
Parameters: 4650000
File Size: 18820005
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_lite0
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1163
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.5%
Top 5 Accuracy: 92.51%
-->
|
pytorch-image-models/hfdocs/source/models/efficientnet.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/models/efficientnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 4915
}
| 203
|
# (Tensorflow) MobileNet v3
**MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block).
The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `tf_mobilenetv3_large_075`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1905-02244,
author = {Andrew Howard and
Mark Sandler and
Grace Chu and
Liang{-}Chieh Chen and
Bo Chen and
Mingxing Tan and
Weijun Wang and
Yukun Zhu and
Ruoming Pang and
Vijay Vasudevan and
Quoc V. Le and
Hartwig Adam},
title = {Searching for MobileNetV3},
journal = {CoRR},
volume = {abs/1905.02244},
year = {2019},
url = {http://arxiv.org/abs/1905.02244},
archivePrefix = {arXiv},
eprint = {1905.02244},
timestamp = {Tue, 12 Jan 2021 15:30:06 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: TF MobileNet V3
Paper:
Title: Searching for MobileNetV3
URL: https://paperswithcode.com/paper/searching-for-mobilenetv3
Models:
- Name: tf_mobilenetv3_large_075
In Collection: TF MobileNet V3
Metadata:
FLOPs: 194323712
Parameters: 3990000
File Size: 16097377
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: tf_mobilenetv3_large_075
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L394
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 73.45%
Top 5 Accuracy: 91.34%
- Name: tf_mobilenetv3_large_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 274535288
Parameters: 5480000
File Size: 22076649
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: tf_mobilenetv3_large_100
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L403
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.51%
Top 5 Accuracy: 92.61%
- Name: tf_mobilenetv3_large_minimal_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 267216928
Parameters: 3920000
File Size: 15836368
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: tf_mobilenetv3_large_minimal_100
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L412
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 72.24%
Top 5 Accuracy: 90.64%
- Name: tf_mobilenetv3_small_075
In Collection: TF MobileNet V3
Metadata:
FLOPs: 48457664
Parameters: 2040000
File Size: 8242701
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: tf_mobilenetv3_small_075
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bilinear
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L421
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 65.72%
Top 5 Accuracy: 86.13%
- Name: tf_mobilenetv3_small_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 65450600
Parameters: 2540000
File Size: 10256398
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: tf_mobilenetv3_small_100
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bilinear
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L430
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 67.92%
Top 5 Accuracy: 87.68%
- Name: tf_mobilenetv3_small_minimal_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 60827936
Parameters: 2040000
File Size: 8258083
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: tf_mobilenetv3_small_minimal_100
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bilinear
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L439
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 62.91%
Top 5 Accuracy: 84.24%
-->
|
pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx",
"repo_id": "pytorch-image-models",
"token_count": 4781
}
| 204
|
from torch.nn.modules.batchnorm import BatchNorm2d
from torchvision.ops.misc import FrozenBatchNorm2d
import timm
from timm.utils.model import freeze, unfreeze
def test_freeze_unfreeze():
model = timm.create_model('resnet18')
# Freeze all
freeze(model)
# Check top level module
assert model.fc.weight.requires_grad == False
# Check submodule
assert model.layer1[0].conv1.weight.requires_grad == False
# Check BN
assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d)
# Unfreeze all
unfreeze(model)
# Check top level module
assert model.fc.weight.requires_grad == True
# Check submodule
assert model.layer1[0].conv1.weight.requires_grad == True
# Check BN
assert isinstance(model.layer1[0].bn1, BatchNorm2d)
# Freeze some
freeze(model, ['layer1', 'layer2.0'])
# Check frozen
assert model.layer1[0].conv1.weight.requires_grad == False
assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d)
assert model.layer2[0].conv1.weight.requires_grad == False
# Check not frozen
assert model.layer3[0].conv1.weight.requires_grad == True
assert isinstance(model.layer3[0].bn1, BatchNorm2d)
assert model.layer2[1].conv1.weight.requires_grad == True
# Unfreeze some
unfreeze(model, ['layer1', 'layer2.0'])
# Check not frozen
assert model.layer1[0].conv1.weight.requires_grad == True
assert isinstance(model.layer1[0].bn1, BatchNorm2d)
assert model.layer2[0].conv1.weight.requires_grad == True
# Freeze/unfreeze BN
# From root
freeze(model, ['layer1.0.bn1'])
assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d)
unfreeze(model, ['layer1.0.bn1'])
assert isinstance(model.layer1[0].bn1, BatchNorm2d)
# From direct parent
freeze(model.layer1[0], ['bn1'])
assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d)
unfreeze(model.layer1[0], ['bn1'])
assert isinstance(model.layer1[0].bn1, BatchNorm2d)
|
pytorch-image-models/tests/test_utils.py/0
|
{
"file_path": "pytorch-image-models/tests/test_utils.py",
"repo_id": "pytorch-image-models",
"token_count": 776
}
| 205
|
""" Random Erasing (Cutout)
Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0
Copyright Zhun Zhong & Liang Zheng
Hacked together by / Copyright 2019, Ross Wightman
"""
import random
import math
import torch
def _get_pixels(per_pixel, rand_color, patch_size, dtype=torch.float32, device='cuda'):
# NOTE I've seen CUDA illegal memory access errors being caused by the normal_()
# paths, flip the order so normal is run on CPU if this becomes a problem
# Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508
if per_pixel:
return torch.empty(patch_size, dtype=dtype, device=device).normal_()
elif rand_color:
return torch.empty((patch_size[0], 1, 1), dtype=dtype, device=device).normal_()
else:
return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device)
class RandomErasing:
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
This variant of RandomErasing is intended to be applied to either a batch
or single image tensor after it has been normalized by dataset mean and std.
Args:
probability: Probability that the Random Erasing operation will be performed.
min_area: Minimum percentage of erased area wrt input image area.
max_area: Maximum percentage of erased area wrt input image area.
min_aspect: Minimum aspect ratio of erased area.
mode: pixel color mode, one of 'const', 'rand', or 'pixel'
'const' - erase block is constant color of 0 for all channels
'rand' - erase block is same per-channel random (normal) color
'pixel' - erase block is per-pixel random (normal) color
max_count: maximum number of erasing blocks per image, area per box is scaled by count.
per-image count is randomly chosen between 1 and this value.
"""
def __init__(
self,
probability=0.5,
min_area=0.02,
max_area=1/3,
min_aspect=0.3,
max_aspect=None,
mode='const',
min_count=1,
max_count=None,
num_splits=0,
device='cuda',
):
self.probability = probability
self.min_area = min_area
self.max_area = max_area
max_aspect = max_aspect or 1 / min_aspect
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
self.min_count = min_count
self.max_count = max_count or min_count
self.num_splits = num_splits
self.mode = mode.lower()
self.rand_color = False
self.per_pixel = False
if self.mode == 'rand':
self.rand_color = True # per block random normal
elif self.mode == 'pixel':
self.per_pixel = True # per pixel random normal
else:
assert not self.mode or self.mode == 'const'
self.device = device
def _erase(self, img, chan, img_h, img_w, dtype):
if random.random() > self.probability:
return
area = img_h * img_w
count = self.min_count if self.min_count == self.max_count else \
random.randint(self.min_count, self.max_count)
for _ in range(count):
for attempt in range(10):
target_area = random.uniform(self.min_area, self.max_area) * area / count
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
img[:, top:top + h, left:left + w] = _get_pixels(
self.per_pixel,
self.rand_color,
(chan, h, w),
dtype=dtype,
device=self.device,
)
break
def __call__(self, input):
if len(input.size()) == 3:
self._erase(input, *input.size(), input.dtype)
else:
batch_size, chan, img_h, img_w = input.size()
# skip first slice of batch if num_splits is set (for clean portion of samples)
batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0
for i in range(batch_start, batch_size):
self._erase(input[i], chan, img_h, img_w, input.dtype)
return input
def __repr__(self):
# NOTE simplified state for repr
fs = self.__class__.__name__ + f'(p={self.probability}, mode={self.mode}'
fs += f', count=({self.min_count}, {self.max_count}))'
return fs
|
pytorch-image-models/timm/data/random_erasing.py/0
|
{
"file_path": "pytorch-image-models/timm/data/random_erasing.py",
"repo_id": "pytorch-image-models",
"token_count": 2258
}
| 206
|
import math
import numbers
import random
import warnings
from typing import List, Sequence, Tuple, Union
import torch
import torchvision.transforms as transforms
import torchvision.transforms.functional as F
try:
from torchvision.transforms.functional import InterpolationMode
has_interpolation_mode = True
except ImportError:
has_interpolation_mode = False
from PIL import Image
import numpy as np
__all__ = [
"ToNumpy", "ToTensor", "str_to_interp_mode", "str_to_pil_interp", "interp_mode_to_str",
"RandomResizedCropAndInterpolation", "CenterCropOrPad", "center_crop_or_pad", "crop_or_pad",
"RandomCropOrPad", "RandomPad", "ResizeKeepRatio", "TrimBorder", "MaybeToTensor", "MaybePILToTensor"
]
class ToNumpy:
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return np_img
class ToTensor:
""" ToTensor with no rescaling of values"""
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img):
return F.pil_to_tensor(pil_img).to(dtype=self.dtype)
class MaybeToTensor(transforms.ToTensor):
"""Convert a PIL Image or ndarray to tensor if it's not already one.
"""
def __init__(self) -> None:
super().__init__()
def __call__(self, pic) -> torch.Tensor:
"""
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if isinstance(pic, torch.Tensor):
return pic
return F.to_tensor(pic)
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
class MaybePILToTensor:
"""Convert a PIL Image to a tensor of the same type - this does not scale values.
"""
def __init__(self) -> None:
super().__init__()
def __call__(self, pic):
"""
Note: A deep copy of the underlying array is performed.
Args:
pic (PIL Image): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if isinstance(pic, torch.Tensor):
return pic
return F.pil_to_tensor(pic)
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
# Pillow is deprecating the top-level resampling attributes (e.g., Image.BILINEAR) in
# favor of the Image.Resampling enum. The top-level resampling attributes will be
# removed in Pillow 10.
if hasattr(Image, "Resampling"):
_pil_interpolation_to_str = {
Image.Resampling.NEAREST: 'nearest',
Image.Resampling.BILINEAR: 'bilinear',
Image.Resampling.BICUBIC: 'bicubic',
Image.Resampling.BOX: 'box',
Image.Resampling.HAMMING: 'hamming',
Image.Resampling.LANCZOS: 'lanczos',
}
else:
_pil_interpolation_to_str = {
Image.NEAREST: 'nearest',
Image.BILINEAR: 'bilinear',
Image.BICUBIC: 'bicubic',
Image.BOX: 'box',
Image.HAMMING: 'hamming',
Image.LANCZOS: 'lanczos',
}
_str_to_pil_interpolation = {b: a for a, b in _pil_interpolation_to_str.items()}
if has_interpolation_mode:
_torch_interpolation_to_str = {
InterpolationMode.NEAREST: 'nearest',
InterpolationMode.BILINEAR: 'bilinear',
InterpolationMode.BICUBIC: 'bicubic',
InterpolationMode.BOX: 'box',
InterpolationMode.HAMMING: 'hamming',
InterpolationMode.LANCZOS: 'lanczos',
}
_str_to_torch_interpolation = {b: a for a, b in _torch_interpolation_to_str.items()}
else:
_pil_interpolation_to_torch = {}
_torch_interpolation_to_str = {}
def str_to_pil_interp(mode_str):
return _str_to_pil_interpolation[mode_str]
def str_to_interp_mode(mode_str):
if has_interpolation_mode:
return _str_to_torch_interpolation[mode_str]
else:
return _str_to_pil_interpolation[mode_str]
def interp_mode_to_str(mode):
if has_interpolation_mode:
return _torch_interpolation_to_str[mode]
else:
return _pil_interpolation_to_str[mode]
_RANDOM_INTERPOLATION = (str_to_interp_mode('bilinear'), str_to_interp_mode('bicubic'))
def _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."):
if isinstance(size, numbers.Number):
return int(size), int(size)
if isinstance(size, Sequence) and len(size) == 1:
return size[0], size[0]
if len(size) != 2:
raise ValueError(error_msg)
return size
class RandomResizedCropAndInterpolation:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3. / 4., 4. / 3.),
interpolation='bilinear',
):
if isinstance(size, (list, tuple)):
self.size = tuple(size)
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
if interpolation == 'random':
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = str_to_interp_mode(interpolation)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
img_w, img_h = F.get_image_size(img)
area = img_w * img_h
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
target_w = int(round(math.sqrt(target_area * aspect_ratio)))
target_h = int(round(math.sqrt(target_area / aspect_ratio)))
if target_w <= img_w and target_h <= img_h:
i = random.randint(0, img_h - target_h)
j = random.randint(0, img_w - target_w)
return i, j, target_h, target_w
# Fallback to central crop
in_ratio = img_w / img_h
if in_ratio < min(ratio):
target_w = img_w
target_h = int(round(target_w / min(ratio)))
elif in_ratio > max(ratio):
target_h = img_h
target_w = int(round(target_h * max(ratio)))
else: # whole image
target_w = img_w
target_h = img_h
i = (img_h - target_h) // 2
j = (img_w - target_w) // 2
return i, j, target_h, target_w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = ' '.join([interp_mode_to_str(x) for x in self.interpolation])
else:
interpolate_str = interp_mode_to_str(self.interpolation)
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
def center_crop_or_pad(
img: torch.Tensor,
output_size: Union[int, List[int]],
fill: Union[int, Tuple[int, int, int]] = 0,
padding_mode: str = 'constant',
) -> torch.Tensor:
"""Center crops and/or pads the given image.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
Args:
img (PIL Image or Tensor): Image to be cropped.
output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int,
it is used for both directions.
fill (int, Tuple[int]): Padding color
Returns:
PIL Image or Tensor: Cropped image.
"""
output_size = _setup_size(output_size)
crop_height, crop_width = output_size
_, image_height, image_width = F.get_dimensions(img)
if crop_width > image_width or crop_height > image_height:
padding_ltrb = [
(crop_width - image_width) // 2 if crop_width > image_width else 0,
(crop_height - image_height) // 2 if crop_height > image_height else 0,
(crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
(crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
]
img = F.pad(img, padding_ltrb, fill=fill, padding_mode=padding_mode)
_, image_height, image_width = F.get_dimensions(img)
if crop_width == image_width and crop_height == image_height:
return img
crop_top = int(round((image_height - crop_height) / 2.0))
crop_left = int(round((image_width - crop_width) / 2.0))
return F.crop(img, crop_top, crop_left, crop_height, crop_width)
class CenterCropOrPad(torch.nn.Module):
"""Crops the given image at the center.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
"""
def __init__(
self,
size: Union[int, List[int]],
fill: Union[int, Tuple[int, int, int]] = 0,
padding_mode: str = 'constant',
):
super().__init__()
self.size = _setup_size(size)
self.fill = fill
self.padding_mode = padding_mode
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
PIL Image or Tensor: Cropped image.
"""
return center_crop_or_pad(img, self.size, fill=self.fill, padding_mode=self.padding_mode)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size})"
def crop_or_pad(
img: torch.Tensor,
top: int,
left: int,
height: int,
width: int,
fill: Union[int, Tuple[int, int, int]] = 0,
padding_mode: str = 'constant',
) -> torch.Tensor:
""" Crops and/or pads image to meet target size, with control over fill and padding_mode.
"""
_, image_height, image_width = F.get_dimensions(img)
right = left + width
bottom = top + height
if left < 0 or top < 0 or right > image_width or bottom > image_height:
padding_ltrb = [
max(-left + min(0, right), 0),
max(-top + min(0, bottom), 0),
max(right - max(image_width, left), 0),
max(bottom - max(image_height, top), 0),
]
img = F.pad(img, padding_ltrb, fill=fill, padding_mode=padding_mode)
top = max(top, 0)
left = max(left, 0)
return F.crop(img, top, left, height, width)
class RandomCropOrPad(torch.nn.Module):
""" Crop and/or pad image with random placement within the crop or pad margin.
"""
def __init__(
self,
size: Union[int, List[int]],
fill: Union[int, Tuple[int, int, int]] = 0,
padding_mode: str = 'constant',
):
super().__init__()
self.size = _setup_size(size)
self.fill = fill
self.padding_mode = padding_mode
@staticmethod
def get_params(img, size):
_, image_height, image_width = F.get_dimensions(img)
delta_height = image_height - size[0]
delta_width = image_width - size[1]
top = int(math.copysign(random.randint(0, abs(delta_height)), delta_height))
left = int(math.copysign(random.randint(0, abs(delta_width)), delta_width))
return top, left
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
PIL Image or Tensor: Cropped image.
"""
top, left = self.get_params(img, self.size)
return crop_or_pad(
img,
top=top,
left=left,
height=self.size[0],
width=self.size[1],
fill=self.fill,
padding_mode=self.padding_mode,
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size})"
class RandomPad:
def __init__(self, input_size, fill=0):
self.input_size = input_size
self.fill = fill
@staticmethod
def get_params(img, input_size):
width, height = F.get_image_size(img)
delta_width = max(input_size[1] - width, 0)
delta_height = max(input_size[0] - height, 0)
pad_left = random.randint(0, delta_width)
pad_top = random.randint(0, delta_height)
pad_right = delta_width - pad_left
pad_bottom = delta_height - pad_top
return pad_left, pad_top, pad_right, pad_bottom
def __call__(self, img):
padding = self.get_params(img, self.input_size)
img = F.pad(img, padding, self.fill)
return img
class ResizeKeepRatio:
""" Resize and Keep Aspect Ratio
"""
def __init__(
self,
size,
longest=0.,
interpolation='bilinear',
random_scale_prob=0.,
random_scale_range=(0.85, 1.05),
random_scale_area=False,
random_aspect_prob=0.,
random_aspect_range=(0.9, 1.11),
):
"""
Args:
size:
longest:
interpolation:
random_scale_prob:
random_scale_range:
random_scale_area:
random_aspect_prob:
random_aspect_range:
"""
if isinstance(size, (list, tuple)):
self.size = tuple(size)
else:
self.size = (size, size)
if interpolation == 'random':
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = str_to_interp_mode(interpolation)
self.longest = float(longest)
self.random_scale_prob = random_scale_prob
self.random_scale_range = random_scale_range
self.random_scale_area = random_scale_area
self.random_aspect_prob = random_aspect_prob
self.random_aspect_range = random_aspect_range
@staticmethod
def get_params(
img,
target_size,
longest,
random_scale_prob=0.,
random_scale_range=(1.0, 1.33),
random_scale_area=False,
random_aspect_prob=0.,
random_aspect_range=(0.9, 1.11)
):
"""Get parameters
"""
img_h, img_w = img_size = F.get_dimensions(img)[1:]
target_h, target_w = target_size
ratio_h = img_h / target_h
ratio_w = img_w / target_w
ratio = max(ratio_h, ratio_w) * longest + min(ratio_h, ratio_w) * (1. - longest)
if random_scale_prob > 0 and random.random() < random_scale_prob:
ratio_factor = random.uniform(random_scale_range[0], random_scale_range[1])
if random_scale_area:
# make ratio factor equivalent to RRC area crop where < 1.0 = area zoom,
# otherwise like affine scale where < 1.0 = linear zoom out
ratio_factor = 1. / math.sqrt(ratio_factor)
ratio_factor = (ratio_factor, ratio_factor)
else:
ratio_factor = (1., 1.)
if random_aspect_prob > 0 and random.random() < random_aspect_prob:
log_aspect = (math.log(random_aspect_range[0]), math.log(random_aspect_range[1]))
aspect_factor = math.exp(random.uniform(*log_aspect))
aspect_factor = math.sqrt(aspect_factor)
# currently applying random aspect adjustment equally to both dims,
# could change to keep output sizes above their target where possible
ratio_factor = (ratio_factor[0] / aspect_factor, ratio_factor[1] * aspect_factor)
size = [round(x * f / ratio) for x, f in zip(img_size, ratio_factor)]
return size
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Resized, padded to at least target size, possibly cropped to exactly target size
"""
size = self.get_params(
img, self.size, self.longest,
self.random_scale_prob, self.random_scale_range, self.random_scale_area,
self.random_aspect_prob, self.random_aspect_range
)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
img = F.resize(img, size, interpolation)
return img
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = ' '.join([interp_mode_to_str(x) for x in self.interpolation])
else:
interpolate_str = interp_mode_to_str(self.interpolation)
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += f', interpolation={interpolate_str}'
format_string += f', longest={self.longest:.3f}'
format_string += f', random_scale_prob={self.random_scale_prob:.3f}'
format_string += f', random_scale_range=(' \
f'{self.random_scale_range[0]:.3f}, {self.random_aspect_range[1]:.3f})'
format_string += f', random_aspect_prob={self.random_aspect_prob:.3f}'
format_string += f', random_aspect_range=(' \
f'{self.random_aspect_range[0]:.3f}, {self.random_aspect_range[1]:.3f}))'
return format_string
class TrimBorder(torch.nn.Module):
def __init__(
self,
border_size: int,
):
super().__init__()
self.border_size = border_size
def forward(self, img):
w, h = F.get_image_size(img)
top = left = self.border_size
top = min(top, h)
left = min(left, h)
height = max(0, h - 2 * self.border_size)
width = max(0, w - 2 * self.border_size)
return F.crop(img, top, left, height, width)
|
pytorch-image-models/timm/data/transforms.py/0
|
{
"file_path": "pytorch-image-models/timm/data/transforms.py",
"repo_id": "pytorch-image-models",
"token_count": 9216
}
| 207
|
""" Conv2d + BN + Act
Hacked together by / Copyright 2020 Ross Wightman
"""
from typing import Any, Dict, Optional, Type
from torch import nn as nn
from .typing import LayerType, PadType
from .blur_pool import create_aa
from .create_conv2d import create_conv2d
from .create_norm_act import get_norm_act_layer
class ConvNormAct(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int = 1,
stride: int = 1,
padding: PadType = '',
dilation: int = 1,
groups: int = 1,
bias: bool = False,
apply_norm: bool = True,
apply_act: bool = True,
norm_layer: LayerType = nn.BatchNorm2d,
act_layer: Optional[LayerType] = nn.ReLU,
aa_layer: Optional[LayerType] = None,
drop_layer: Optional[Type[nn.Module]] = None,
conv_kwargs: Optional[Dict[str, Any]] = None,
norm_kwargs: Optional[Dict[str, Any]] = None,
act_kwargs: Optional[Dict[str, Any]] = None,
):
super(ConvNormAct, self).__init__()
conv_kwargs = conv_kwargs or {}
norm_kwargs = norm_kwargs or {}
act_kwargs = act_kwargs or {}
use_aa = aa_layer is not None and stride > 1
self.conv = create_conv2d(
in_channels,
out_channels,
kernel_size,
stride=1 if use_aa else stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
**conv_kwargs,
)
if apply_norm:
# NOTE for backwards compatibility with models that use separate norm and act layer definitions
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
# NOTE for backwards (weight) compatibility, norm layer name remains `.bn`
if drop_layer:
norm_kwargs['drop_layer'] = drop_layer
self.bn = norm_act_layer(
out_channels,
apply_act=apply_act,
act_kwargs=act_kwargs,
**norm_kwargs,
)
else:
self.bn = nn.Sequential()
if drop_layer:
norm_kwargs['drop_layer'] = drop_layer
self.bn.add_module('drop', drop_layer())
self.aa = create_aa(aa_layer, out_channels, stride=stride, enable=use_aa, noop=None)
@property
def in_channels(self):
return self.conv.in_channels
@property
def out_channels(self):
return self.conv.out_channels
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.aa is not None:
x = self.aa(x)
return x
ConvBnAct = ConvNormAct
ConvNormActAa = ConvNormAct # backwards compat, when they were separate
|
pytorch-image-models/timm/layers/conv_bn_act.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/conv_bn_act.py",
"repo_id": "pytorch-image-models",
"token_count": 1426
}
| 208
|
""" Halo Self Attention
Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
- https://arxiv.org/abs/2103.12731
@misc{2103.12731,
Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and
Jonathon Shlens},
Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones},
Year = {2021},
}
Status:
This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me.
The attention mechanism works but it's slow as implemented.
Hacked together by / Copyright 2021 Ross Wightman
"""
from typing import List
import torch
from torch import nn
import torch.nn.functional as F
from .helpers import make_divisible
from .weight_init import trunc_normal_
from .trace_utils import _assert
def rel_logits_1d(q, rel_k, permute_mask: List[int]):
""" Compute relative logits along one dimension
As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
Args:
q: (batch, height, width, dim)
rel_k: (2 * window - 1, dim)
permute_mask: permute output dim according to this
"""
B, H, W, dim = q.shape
rel_size = rel_k.shape[0]
win_size = (rel_size + 1) // 2
x = (q @ rel_k.transpose(-1, -2))
x = x.reshape(-1, W, rel_size)
# pad to shift from relative to absolute indexing
x_pad = F.pad(x, [0, 1]).flatten(1)
x_pad = F.pad(x_pad, [0, rel_size - W])
# reshape and slice out the padded elements
x_pad = x_pad.reshape(-1, W + 1, rel_size)
x = x_pad[:, :W, win_size - 1:]
# reshape and tile
x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1)
return x.permute(permute_mask)
class PosEmbedRel(nn.Module):
""" Relative Position Embedding
As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
"""
def __init__(self, block_size, win_size, dim_head, scale):
"""
Args:
block_size (int): block size
win_size (int): neighbourhood window size
dim_head (int): attention head dim
scale (float): scale factor (for init)
"""
super().__init__()
self.block_size = block_size
self.dim_head = dim_head
self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale)
self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale)
def forward(self, q):
B, BB, HW, _ = q.shape
# relative logits in width dimension.
q = q.reshape(-1, self.block_size, self.block_size, self.dim_head)
rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4))
# relative logits in height dimension.
q = q.transpose(1, 2)
rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2))
rel_logits = rel_logits_h + rel_logits_w
rel_logits = rel_logits.reshape(B, BB, HW, -1)
return rel_logits
class HaloAttn(nn.Module):
""" Halo Attention
Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
- https://arxiv.org/abs/2103.12731
The internal dimensions of the attention module are controlled by the interaction of several arguments.
* the output dimension of the module is specified by dim_out, which falls back to input dim if not set
* the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim
* the query and key (qk) dimensions are determined by
* num_heads * dim_head if dim_head is not None
* num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None
* as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used
Args:
dim (int): input dimension to the module
dim_out (int): output dimension of the module, same as dim if not set
feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda)
stride: output stride of the module, query downscaled if > 1 (default: 1).
num_heads: parallel attention heads (default: 8).
dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set
block_size (int): size of blocks. (default: 8)
halo_size (int): size of halo overlap. (default: 3)
qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0)
qkv_bias (bool) : add bias to q, k, and v projections
avg_down (bool): use average pool downsample instead of strided query blocks
scale_pos_embed (bool): scale the position embedding as well as Q @ K
"""
def __init__(
self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3,
qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False):
super().__init__()
dim_out = dim_out or dim
assert dim_out % num_heads == 0
assert stride in (1, 2)
self.num_heads = num_heads
self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads
self.dim_head_v = dim_out // self.num_heads
self.dim_out_qk = num_heads * self.dim_head_qk
self.dim_out_v = num_heads * self.dim_head_v
self.scale = self.dim_head_qk ** -0.5
self.scale_pos_embed = scale_pos_embed
self.block_size = self.block_size_ds = block_size
self.halo_size = halo_size
self.win_size = block_size + halo_size * 2 # neighbourhood window size
self.block_stride = 1
use_avg_pool = False
if stride > 1:
use_avg_pool = avg_down or block_size % stride != 0
self.block_stride = 1 if use_avg_pool else stride
self.block_size_ds = self.block_size // self.block_stride
# FIXME not clear if this stride behaviour is what the paper intended
# Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving
# data in unfolded block form. I haven't wrapped my head around how that'd look.
self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias)
self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias)
self.pos_embed = PosEmbedRel(
block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale)
self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity()
self.reset_parameters()
def reset_parameters(self):
std = self.q.weight.shape[1] ** -0.5 # fan-in
trunc_normal_(self.q.weight, std=std)
trunc_normal_(self.kv.weight, std=std)
trunc_normal_(self.pos_embed.height_rel, std=self.scale)
trunc_normal_(self.pos_embed.width_rel, std=self.scale)
def forward(self, x):
B, C, H, W = x.shape
_assert(H % self.block_size == 0, '')
_assert(W % self.block_size == 0, '')
num_h_blocks = H // self.block_size
num_w_blocks = W // self.block_size
num_blocks = num_h_blocks * num_w_blocks
q = self.q(x)
# unfold
q = q.reshape(
-1, self.dim_head_qk,
num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4)
# B, num_heads * dim_head * block_size ** 2, num_blocks
q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3)
# B * num_heads, num_blocks, block_size ** 2, dim_head
kv = self.kv(x)
# Generate overlapping windows for kv. This approach is good for GPU and CPU. However, unfold() is not
# lowered for PyTorch XLA so it will be very slow. See code at bottom of file for XLA friendly approach.
# FIXME figure out how to switch impl between this and conv2d if XLA being used.
kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size])
kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape(
B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1)
k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1)
# B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v
if self.scale_pos_embed:
attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale
else:
attn = (q @ k.transpose(-1, -2)) * self.scale + self.pos_embed(q)
# B * num_heads, num_blocks, block_size ** 2, win_size ** 2
attn = attn.softmax(dim=-1)
out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks
# fold
out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks)
out = out.permute(0, 3, 1, 4, 2).contiguous().view(
B, self.dim_out_v, H // self.block_stride, W // self.block_stride)
# B, dim_out, H // block_stride, W // block_stride
out = self.pool(out)
return out
""" Three alternatives for overlapping windows.
`.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold()
if is_xla:
# This code achieves haloing on PyTorch XLA with reasonable runtime trade-off, it is
# EXTREMELY slow for backward on a GPU though so I need a way of selecting based on environment.
WW = self.win_size ** 2
pw = torch.eye(WW, dtype=x.dtype, device=x.device).reshape(WW, 1, self.win_size, self.win_size)
kv = F.conv2d(kv.reshape(-1, 1, H, W), pw, stride=self.block_size, padding=self.halo_size)
elif self.stride_tricks:
kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous()
kv = kv.as_strided((
B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks),
stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size))
else:
kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size)
kv = kv.reshape(
B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3)
"""
|
pytorch-image-models/timm/layers/halo_attn.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/halo_attn.py",
"repo_id": "pytorch-image-models",
"token_count": 4601
}
| 209
|
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
class PatchDropout(nn.Module):
"""
https://arxiv.org/abs/2212.00794 and https://arxiv.org/pdf/2208.07220
"""
return_indices: torch.jit.Final[bool]
def __init__(
self,
prob: float = 0.5,
num_prefix_tokens: int = 1,
ordered: bool = False,
return_indices: bool = False,
):
super().__init__()
assert 0 <= prob < 1.
self.prob = prob
self.num_prefix_tokens = num_prefix_tokens # exclude CLS token (or other prefix tokens)
self.ordered = ordered
self.return_indices = return_indices
def forward(self, x) -> Union[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]]]:
if not self.training or self.prob == 0.:
if self.return_indices:
return x, None
return x
if self.num_prefix_tokens:
prefix_tokens, x = x[:, :self.num_prefix_tokens], x[:, self.num_prefix_tokens:]
else:
prefix_tokens = None
B = x.shape[0]
L = x.shape[1]
num_keep = max(1, int(L * (1. - self.prob)))
keep_indices = torch.argsort(torch.randn(B, L, device=x.device), dim=-1)[:, :num_keep]
if self.ordered:
# NOTE does not need to maintain patch order in typical transformer use,
# but possibly useful for debug / visualization
keep_indices = keep_indices.sort(dim=-1)[0]
x = x.gather(1, keep_indices.unsqueeze(-1).expand((-1, -1) + x.shape[2:]))
if prefix_tokens is not None:
x = torch.cat((prefix_tokens, x), dim=1)
if self.return_indices:
return x, keep_indices
return x
|
pytorch-image-models/timm/layers/patch_dropout.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/patch_dropout.py",
"repo_id": "pytorch-image-models",
"token_count": 858
}
| 210
|
import torch
import math
import warnings
from torch import nn
from torch.nn.init import _calculate_fan_in_and_fan_out
def _trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are
applied while sampling the normal with mean/std applied, therefore a, b args
should be adjusted to match the range of mean, std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
with torch.no_grad():
return _trunc_normal_(tensor, mean, std, a, b)
def trunc_normal_tf_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
and the result is subsquently scaled and shifted by the mean and std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
with torch.no_grad():
_trunc_normal_(tensor, 0, 1.0, a, b)
tensor.mul_(std).add_(mean)
return tensor
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
# constant is stddev of standard normal truncated to (-2, 2)
trunc_normal_tf_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
with torch.no_grad():
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
with torch.no_grad():
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
def init_weight_vit(
module: nn.Module,
name: str,
init_bias: float = 0.02,
head_bias: float = 0.,
classifier_name: str = 'head'
):
if isinstance(module, (nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d)):
if name.startswith(classifier_name):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
else:
nn.init.trunc_normal_(module.weight, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
nn.init.constant_(module.bias, init_bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def init_weight_jax(
module: nn.Module,
name: str,
head_bias: float = 0.,
classifier_name: str = 'head',
):
if isinstance(module, nn.Linear):
if name.startswith(classifier_name):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.normal_(module.bias, std=1e-6) if 'mlp' in name else nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
|
pytorch-image-models/timm/layers/weight_init.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/weight_init.py",
"repo_id": "pytorch-image-models",
"token_count": 2579
}
| 211
|
import copy
from collections import deque, defaultdict
from dataclasses import dataclass, field, replace, asdict
from typing import Any, Deque, Dict, Tuple, Optional, Union
__all__ = ['PretrainedCfg', 'filter_pretrained_cfg', 'DefaultCfg']
@dataclass
class PretrainedCfg:
"""
"""
# weight source locations
url: Optional[Union[str, Tuple[str, str]]] = None # remote URL
file: Optional[str] = None # local / shared filesystem path
state_dict: Optional[Dict[str, Any]] = None # in-memory state dict
hf_hub_id: Optional[str] = None # Hugging Face Hub model id ('organization/model')
hf_hub_filename: Optional[str] = None # Hugging Face Hub filename (overrides default)
source: Optional[str] = None # source of cfg / weight location used (url, file, hf-hub)
architecture: Optional[str] = None # architecture variant can be set when not implicit
tag: Optional[str] = None # pretrained tag of source
custom_load: bool = False # use custom model specific model.load_pretrained() (ie for npz files)
# input / data config
input_size: Tuple[int, int, int] = (3, 224, 224)
test_input_size: Optional[Tuple[int, int, int]] = None
min_input_size: Optional[Tuple[int, int, int]] = None
fixed_input_size: bool = False
interpolation: str = 'bicubic'
crop_pct: float = 0.875
test_crop_pct: Optional[float] = None
crop_mode: str = 'center'
mean: Tuple[float, ...] = (0.485, 0.456, 0.406)
std: Tuple[float, ...] = (0.229, 0.224, 0.225)
# head / classifier config and meta-data
num_classes: int = 1000
label_offset: Optional[int] = None
label_names: Optional[Tuple[str]] = None
label_descriptions: Optional[Dict[str, str]] = None
# model attributes that vary with above or required for pretrained adaptation
pool_size: Optional[Tuple[int, ...]] = None
test_pool_size: Optional[Tuple[int, ...]] = None
first_conv: Optional[str] = None
classifier: Optional[str] = None
license: Optional[str] = None
description: Optional[str] = None
origin_url: Optional[str] = None
paper_name: Optional[str] = None
paper_ids: Optional[Union[str, Tuple[str]]] = None
notes: Optional[Tuple[str]] = None
@property
def has_weights(self):
return self.url or self.file or self.hf_hub_id
def to_dict(self, remove_source=False, remove_null=True):
return filter_pretrained_cfg(
asdict(self),
remove_source=remove_source,
remove_null=remove_null
)
def filter_pretrained_cfg(cfg, remove_source=False, remove_null=True):
filtered_cfg = {}
keep_null = {'pool_size', 'first_conv', 'classifier'} # always keep these keys, even if none
for k, v in cfg.items():
if remove_source and k in {'url', 'file', 'hf_hub_id', 'hf_hub_id', 'hf_hub_filename', 'source'}:
continue
if remove_null and v is None and k not in keep_null:
continue
filtered_cfg[k] = v
return filtered_cfg
@dataclass
class DefaultCfg:
tags: Deque[str] = field(default_factory=deque) # priority queue of tags (first is default)
cfgs: Dict[str, PretrainedCfg] = field(default_factory=dict) # pretrained cfgs by tag
is_pretrained: bool = False # at least one of the configs has a pretrained source set
@property
def default(self):
return self.cfgs[self.tags[0]]
@property
def default_with_tag(self):
tag = self.tags[0]
return tag, self.cfgs[tag]
|
pytorch-image-models/timm/models/_pretrained.py/0
|
{
"file_path": "pytorch-image-models/timm/models/_pretrained.py",
"repo_id": "pytorch-image-models",
"token_count": 1341
}
| 212
|
""" CrossViT Model
@inproceedings{
chen2021crossvit,
title={{CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification}},
author={Chun-Fu (Richard) Chen and Quanfu Fan and Rameswar Panda},
booktitle={International Conference on Computer Vision (ICCV)},
year={2021}
}
Paper link: https://arxiv.org/abs/2103.14899
Original code: https://github.com/IBM/CrossViT/blob/main/models/crossvit.py
NOTE: model names have been renamed from originals to represent actual input res all *_224 -> *_240 and *_384 -> *_408
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# Copyright IBM All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Modifed from Timm. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
from functools import partial
from typing import List, Optional, Tuple
import torch
import torch.hub
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, to_2tuple, trunc_normal_, _assert
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._registry import register_model, generate_default_cfgs
from .vision_transformer import Block
__all__ = ['CrossVit'] # model_registry will add each entrypoint fn to this
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, multi_conv=False):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
if multi_conv:
if patch_size[0] == 12:
self.proj = nn.Sequential(
nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=3, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1),
)
elif patch_size[0] == 16:
self.proj = nn.Sequential(
nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1),
)
else:
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
_assert(H == self.img_size[0],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
_assert(W == self.img_size[1],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class CrossAttention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = head_dim ** -0.5
self.wq = nn.Linear(dim, dim, bias=qkv_bias)
self.wk = nn.Linear(dim, dim, bias=qkv_bias)
self.wv = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
# B1C -> B1H(C/H) -> BH1(C/H)
q = self.wq(x[:, 0:1, ...]).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
# BNC -> BNH(C/H) -> BHN(C/H)
k = self.wk(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
# BNC -> BNH(C/H) -> BHN(C/H)
v = self.wv(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1)) * self.scale # BH1(C/H) @ BH(C/H)N -> BH1N
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, 1, C) # (BH1N @ BHN(C/H)) -> BH1(C/H) -> B1H(C/H) -> B1C
x = self.proj(x)
x = self.proj_drop(x)
return x
class CrossAttentionBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = CrossAttention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x[:, 0:1, ...] + self.drop_path(self.attn(self.norm1(x)))
return x
class MultiScaleBlock(nn.Module):
def __init__(
self,
dim,
patches,
depth,
num_heads,
mlp_ratio,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
num_branches = len(dim)
self.num_branches = num_branches
# different branch could have different embedding size, the first one is the base
self.blocks = nn.ModuleList()
for d in range(num_branches):
tmp = []
for i in range(depth[d]):
tmp.append(Block(
dim=dim[d],
num_heads=num_heads[d],
mlp_ratio=mlp_ratio[d],
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i],
norm_layer=norm_layer,
))
if len(tmp) != 0:
self.blocks.append(nn.Sequential(*tmp))
if len(self.blocks) == 0:
self.blocks = None
self.projs = nn.ModuleList()
for d in range(num_branches):
if dim[d] == dim[(d + 1) % num_branches] and False:
tmp = [nn.Identity()]
else:
tmp = [norm_layer(dim[d]), act_layer(), nn.Linear(dim[d], dim[(d + 1) % num_branches])]
self.projs.append(nn.Sequential(*tmp))
self.fusion = nn.ModuleList()
for d in range(num_branches):
d_ = (d + 1) % num_branches
nh = num_heads[d_]
if depth[-1] == 0: # backward capability:
self.fusion.append(
CrossAttentionBlock(
dim=dim[d_],
num_heads=nh,
mlp_ratio=mlp_ratio[d],
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[-1],
norm_layer=norm_layer,
))
else:
tmp = []
for _ in range(depth[-1]):
tmp.append(CrossAttentionBlock(
dim=dim[d_],
num_heads=nh,
mlp_ratio=mlp_ratio[d],
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[-1],
norm_layer=norm_layer,
))
self.fusion.append(nn.Sequential(*tmp))
self.revert_projs = nn.ModuleList()
for d in range(num_branches):
if dim[(d + 1) % num_branches] == dim[d] and False:
tmp = [nn.Identity()]
else:
tmp = [norm_layer(dim[(d + 1) % num_branches]), act_layer(),
nn.Linear(dim[(d + 1) % num_branches], dim[d])]
self.revert_projs.append(nn.Sequential(*tmp))
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
outs_b = []
for i, block in enumerate(self.blocks):
outs_b.append(block(x[i]))
# only take the cls token out
proj_cls_token = torch.jit.annotate(List[torch.Tensor], [])
for i, proj in enumerate(self.projs):
proj_cls_token.append(proj(outs_b[i][:, 0:1, ...]))
# cross attention
outs = []
for i, (fusion, revert_proj) in enumerate(zip(self.fusion, self.revert_projs)):
tmp = torch.cat((proj_cls_token[i], outs_b[(i + 1) % self.num_branches][:, 1:, ...]), dim=1)
tmp = fusion(tmp)
reverted_proj_cls_token = revert_proj(tmp[:, 0:1, ...])
tmp = torch.cat((reverted_proj_cls_token, outs_b[i][:, 1:, ...]), dim=1)
outs.append(tmp)
return outs
def _compute_num_patches(img_size, patches):
return [i[0] // p * i[1] // p for i, p in zip(img_size, patches)]
@register_notrace_function
def scale_image(x, ss: Tuple[int, int], crop_scale: bool = False): # annotations for torchscript
"""
Pulled out of CrossViT.forward_features to bury conditional logic in a leaf node for FX tracing.
Args:
x (Tensor): input image
ss (tuple[int, int]): height and width to scale to
crop_scale (bool): whether to crop instead of interpolate to achieve the desired scale. Defaults to False
Returns:
Tensor: the "scaled" image batch tensor
"""
H, W = x.shape[-2:]
if H != ss[0] or W != ss[1]:
if crop_scale and ss[0] <= H and ss[1] <= W:
cu, cl = int(round((H - ss[0]) / 2.)), int(round((W - ss[1]) / 2.))
x = x[:, :, cu:cu + ss[0], cl:cl + ss[1]]
else:
x = torch.nn.functional.interpolate(x, size=ss, mode='bicubic', align_corners=False)
return x
class CrossVit(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(
self,
img_size=224,
img_scale=(1.0, 1.0),
patch_size=(8, 16),
in_chans=3,
num_classes=1000,
embed_dim=(192, 384),
depth=((1, 3, 1), (1, 3, 1), (1, 3, 1)),
num_heads=(6, 12),
mlp_ratio=(2., 2., 4.),
multi_conv=False,
crop_scale=False,
qkv_bias=True,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
global_pool='token',
):
super().__init__()
assert global_pool in ('token', 'avg')
self.num_classes = num_classes
self.global_pool = global_pool
self.img_size = to_2tuple(img_size)
img_scale = to_2tuple(img_scale)
self.img_size_scaled = [tuple([int(sj * si) for sj in self.img_size]) for si in img_scale]
self.crop_scale = crop_scale # crop instead of interpolate for scale
num_patches = _compute_num_patches(self.img_size_scaled, patch_size)
self.num_branches = len(patch_size)
self.embed_dim = embed_dim
self.num_features = self.head_hidden_size = sum(embed_dim)
self.patch_embed = nn.ModuleList()
# hard-coded for torch jit script
for i in range(self.num_branches):
setattr(self, f'pos_embed_{i}', nn.Parameter(torch.zeros(1, 1 + num_patches[i], embed_dim[i])))
setattr(self, f'cls_token_{i}', nn.Parameter(torch.zeros(1, 1, embed_dim[i])))
for im_s, p, d in zip(self.img_size_scaled, patch_size, embed_dim):
self.patch_embed.append(
PatchEmbed(
img_size=im_s,
patch_size=p,
in_chans=in_chans,
embed_dim=d,
multi_conv=multi_conv,
))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
total_depth = sum([sum(x[-2:]) for x in depth])
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)] # stochastic depth decay rule
dpr_ptr = 0
self.blocks = nn.ModuleList()
for idx, block_cfg in enumerate(depth):
curr_depth = max(block_cfg[:-1]) + block_cfg[-1]
dpr_ = dpr[dpr_ptr:dpr_ptr + curr_depth]
blk = MultiScaleBlock(
embed_dim,
num_patches,
block_cfg,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr_,
norm_layer=norm_layer,
)
dpr_ptr += curr_depth
self.blocks.append(blk)
self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)])
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.ModuleList([
nn.Linear(embed_dim[i], num_classes) if num_classes > 0 else nn.Identity()
for i in range(self.num_branches)])
for i in range(self.num_branches):
trunc_normal_(getattr(self, f'pos_embed_{i}'), std=.02)
trunc_normal_(getattr(self, f'cls_token_{i}'), std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
out = set()
for i in range(self.num_branches):
out.add(f'cls_token_{i}')
pe = getattr(self, f'pos_embed_{i}', None)
if pe is not None and pe.requires_grad:
out.add(f'pos_embed_{i}')
return out
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('token', 'avg')
self.global_pool = global_pool
self.head = nn.ModuleList([
nn.Linear(self.embed_dim[i], num_classes) if num_classes > 0 else nn.Identity()
for i in range(self.num_branches)
])
def forward_features(self, x) -> List[torch.Tensor]:
B = x.shape[0]
xs = []
for i, patch_embed in enumerate(self.patch_embed):
x_ = x
ss = self.img_size_scaled[i]
x_ = scale_image(x_, ss, self.crop_scale)
x_ = patch_embed(x_)
cls_tokens = self.cls_token_0 if i == 0 else self.cls_token_1 # hard-coded for torch jit script
cls_tokens = cls_tokens.expand(B, -1, -1)
x_ = torch.cat((cls_tokens, x_), dim=1)
pos_embed = self.pos_embed_0 if i == 0 else self.pos_embed_1 # hard-coded for torch jit script
x_ = x_ + pos_embed
x_ = self.pos_drop(x_)
xs.append(x_)
for i, blk in enumerate(self.blocks):
xs = blk(xs)
# NOTE: was before branch token section, move to here to assure all branch token are before layer norm
xs = [norm(xs[i]) for i, norm in enumerate(self.norm)]
return xs
def forward_head(self, xs: List[torch.Tensor], pre_logits: bool = False) -> torch.Tensor:
xs = [x[:, 1:].mean(dim=1) for x in xs] if self.global_pool == 'avg' else [x[:, 0] for x in xs]
xs = [self.head_drop(x) for x in xs]
if pre_logits or isinstance(self.head[0], nn.Identity):
return torch.cat([x for x in xs], dim=1)
return torch.mean(torch.stack([head(xs[i]) for i, head in enumerate(self.head)], dim=0), dim=0)
def forward(self, x):
xs = self.forward_features(x)
x = self.forward_head(xs)
return x
def _create_crossvit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
def pretrained_filter_fn(state_dict):
new_state_dict = {}
for key in state_dict.keys():
if 'pos_embed' in key or 'cls_token' in key:
new_key = key.replace(".", "_")
else:
new_key = key
new_state_dict[new_key] = state_dict[key]
return new_state_dict
return build_model_with_cfg(
CrossVit,
variant,
pretrained,
pretrained_filter_fn=pretrained_filter_fn,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 240, 240), 'pool_size': None, 'crop_pct': 0.875,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True,
'first_conv': ('patch_embed.0.proj', 'patch_embed.1.proj'),
'classifier': ('head.0', 'head.1'),
**kwargs
}
default_cfgs = generate_default_cfgs({
'crossvit_15_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_15_dagger_240.in1k': _cfg(
hf_hub_id='timm/',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_15_dagger_408.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0,
),
'crossvit_18_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_18_dagger_240.in1k': _cfg(
hf_hub_id='timm/',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_18_dagger_408.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0,
),
'crossvit_9_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_9_dagger_240.in1k': _cfg(
hf_hub_id='timm/',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_base_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_small_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_tiny_240.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def crossvit_tiny_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[96, 192], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[3, 3], mlp_ratio=[4, 4, 1])
model = _create_crossvit(variant='crossvit_tiny_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_small_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[6, 6], mlp_ratio=[4, 4, 1])
model = _create_crossvit(variant='crossvit_small_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_base_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[384, 768], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[12, 12], mlp_ratio=[4, 4, 1])
model = _create_crossvit(variant='crossvit_base_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_9_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]],
num_heads=[4, 4], mlp_ratio=[3, 3, 1])
model = _create_crossvit(variant='crossvit_9_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_15_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1])
model = _create_crossvit(variant='crossvit_15_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_18_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], **kwargs)
model = _create_crossvit(variant='crossvit_18_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_9_dagger_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]],
num_heads=[4, 4], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_9_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_15_dagger_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_15_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_15_dagger_408(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_15_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_18_dagger_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_18_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_18_dagger_408(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_18_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs))
return model
|
pytorch-image-models/timm/models/crossvit.py/0
|
{
"file_path": "pytorch-image-models/timm/models/crossvit.py",
"repo_id": "pytorch-image-models",
"token_count": 12490
}
| 213
|
# NOTE timm.models.layers is DEPRECATED, please use timm.layers, this is here to reduce breakages in transition
from timm.layers.activations import *
from timm.layers.adaptive_avgmax_pool import \
adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d
from timm.layers.attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding
from timm.layers.blur_pool import BlurPool2d
from timm.layers.classifier import ClassifierHead, create_classifier
from timm.layers.cond_conv2d import CondConv2d, get_condconv_initializer
from timm.layers.config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\
set_layer_config
from timm.layers.conv2d_same import Conv2dSame, conv2d_same
from timm.layers.conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct
from timm.layers.create_act import create_act_layer, get_act_layer, get_act_fn
from timm.layers.create_attn import get_attn, create_attn
from timm.layers.create_conv2d import create_conv2d
from timm.layers.create_norm import get_norm_layer, create_norm_layer
from timm.layers.create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer
from timm.layers.drop import DropBlock2d, DropPath, drop_block_2d, drop_path
from timm.layers.eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn
from timm.layers.evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\
EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a
from timm.layers.fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm
from timm.layers.filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d
from timm.layers.gather_excite import GatherExcite
from timm.layers.global_context import GlobalContext
from timm.layers.helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple
from timm.layers.inplace_abn import InplaceAbn
from timm.layers.linear import Linear
from timm.layers.mixed_conv2d import MixedConv2d
from timm.layers.mlp import Mlp, GluMlp, GatedMlp, ConvMlp
from timm.layers.non_local_attn import NonLocalAttn, BatNonLocalAttn
from timm.layers.norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d
from timm.layers.norm_act import BatchNormAct2d, GroupNormAct, convert_sync_batchnorm
from timm.layers.padding import get_padding, get_same_padding, pad_same
from timm.layers.patch_embed import PatchEmbed
from timm.layers.pool2d_same import AvgPool2dSame, create_pool2d
from timm.layers.squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite
from timm.layers.selective_kernel import SelectiveKernel
from timm.layers.separable_conv import SeparableConv2d, SeparableConvNormAct
from timm.layers.split_attn import SplitAttn
from timm.layers.split_batchnorm import SplitBatchNorm2d, convert_splitbn_model
from timm.layers.std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame
from timm.layers.test_time_pool import TestTimePoolHead, apply_test_time_pool
from timm.layers.trace_utils import _assert, _float_to_int
from timm.layers.weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_
import warnings
warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.layers", DeprecationWarning)
|
pytorch-image-models/timm/models/layers/__init__.py/0
|
{
"file_path": "pytorch-image-models/timm/models/layers/__init__.py",
"repo_id": "pytorch-image-models",
"token_count": 1222
}
| 214
|
""" TinyViT
Paper: `TinyViT: Fast Pretraining Distillation for Small Vision Transformers`
- https://arxiv.org/abs/2207.10666
Adapted from official impl at https://github.com/microsoft/Cream/tree/main/TinyViT
"""
__all__ = ['TinyVit']
import itertools
from functools import partial
from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import LayerNorm2d, NormMlpClassifierHead, DropPath,\
trunc_normal_, resize_rel_pos_bias_table_levit, use_fused_attn
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
class ConvNorm(torch.nn.Sequential):
def __init__(self, in_chs, out_chs, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1):
super().__init__()
self.conv = nn.Conv2d(in_chs, out_chs, ks, stride, pad, dilation, groups, bias=False)
self.bn = nn.BatchNorm2d(out_chs)
torch.nn.init.constant_(self.bn.weight, bn_weight_init)
torch.nn.init.constant_(self.bn.bias, 0)
@torch.no_grad()
def fuse(self):
c, bn = self.conv, self.bn
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = c.weight * w[:, None, None, None]
b = bn.bias - bn.running_mean * bn.weight / \
(bn.running_var + bn.eps) ** 0.5
m = torch.nn.Conv2d(
w.size(1) * self.conv.groups, w.size(0), w.shape[2:],
stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class PatchEmbed(nn.Module):
def __init__(self, in_chs, out_chs, act_layer):
super().__init__()
self.stride = 4
self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1)
self.act = act_layer()
self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1)
def forward(self, x):
x = self.conv1(x)
x = self.act(x)
x = self.conv2(x)
return x
class MBConv(nn.Module):
def __init__(self, in_chs, out_chs, expand_ratio, act_layer, drop_path):
super().__init__()
mid_chs = int(in_chs * expand_ratio)
self.conv1 = ConvNorm(in_chs, mid_chs, ks=1)
self.act1 = act_layer()
self.conv2 = ConvNorm(mid_chs, mid_chs, ks=3, stride=1, pad=1, groups=mid_chs)
self.act2 = act_layer()
self.conv3 = ConvNorm(mid_chs, out_chs, ks=1, bn_weight_init=0.0)
self.act3 = act_layer()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.act2(x)
x = self.conv3(x)
x = self.drop_path(x)
x += shortcut
x = self.act3(x)
return x
class PatchMerging(nn.Module):
def __init__(self, dim, out_dim, act_layer):
super().__init__()
self.conv1 = ConvNorm(dim, out_dim, 1, 1, 0)
self.act1 = act_layer()
self.conv2 = ConvNorm(out_dim, out_dim, 3, 2, 1, groups=out_dim)
self.act2 = act_layer()
self.conv3 = ConvNorm(out_dim, out_dim, 1, 1, 0)
def forward(self, x):
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.act2(x)
x = self.conv3(x)
return x
class ConvLayer(nn.Module):
def __init__(
self,
dim,
depth,
act_layer,
drop_path=0.,
conv_expand_ratio=4.,
):
super().__init__()
self.dim = dim
self.depth = depth
self.blocks = nn.Sequential(*[
MBConv(
dim, dim, conv_expand_ratio, act_layer,
drop_path[i] if isinstance(drop_path, list) else drop_path,
)
for i in range(depth)
])
def forward(self, x):
x = self.blocks(x)
return x
class NormMlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
norm_layer=nn.LayerNorm,
act_layer=nn.GELU,
drop=0.,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.norm = norm_layer(in_features)
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop)
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop)
def forward(self, x):
x = self.norm(x)
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class Attention(torch.nn.Module):
fused_attn: torch.jit.Final[bool]
attention_bias_cache: Dict[str, torch.Tensor]
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4,
resolution=(14, 14),
):
super().__init__()
assert isinstance(resolution, tuple) and len(resolution) == 2
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.val_dim = int(attn_ratio * key_dim)
self.out_dim = self.val_dim * num_heads
self.attn_ratio = attn_ratio
self.resolution = resolution
self.fused_attn = use_fused_attn()
self.norm = nn.LayerNorm(dim)
self.qkv = nn.Linear(dim, num_heads * (self.val_dim + 2 * key_dim))
self.proj = nn.Linear(self.out_dim, dim)
points = list(itertools.product(range(resolution[0]), range(resolution[1])))
N = len(points)
attention_offsets = {}
idxs = []
for p1 in points:
for p2 in points:
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
if offset not in attention_offsets:
attention_offsets[offset] = len(attention_offsets)
idxs.append(attention_offsets[offset])
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False)
self.attention_bias_cache = {}
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {} # clear ab cache
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
if torch.jit.is_tracing() or self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, x):
attn_bias = self.get_attention_biases(x.device)
B, N, _ = x.shape
# Normalization
x = self.norm(x)
qkv = self.qkv(x)
# (B, N, num_heads, d)
q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.val_dim], dim=3)
# (B, num_heads, N, d)
q = q.permute(0, 2, 1, 3)
k = k.permute(0, 2, 1, 3)
v = v.permute(0, 2, 1, 3)
if self.fused_attn:
x = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_bias)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn + attn_bias
attn = attn.softmax(dim=-1)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, self.out_dim)
x = self.proj(x)
return x
class TinyVitBlock(nn.Module):
""" TinyViT Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): Window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
local_conv_size (int): the kernel size of the convolution between
Attention and MLP. Default: 3
act_layer: the activation function. Default: nn.GELU
"""
def __init__(
self,
dim,
num_heads,
window_size=7,
mlp_ratio=4.,
drop=0.,
drop_path=0.,
local_conv_size=3,
act_layer=nn.GELU
):
super().__init__()
self.dim = dim
self.num_heads = num_heads
assert window_size > 0, 'window_size must be greater than 0'
self.window_size = window_size
self.mlp_ratio = mlp_ratio
assert dim % num_heads == 0, 'dim must be divisible by num_heads'
head_dim = dim // num_heads
window_resolution = (window_size, window_size)
self.attn = Attention(dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = NormMlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=drop,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
pad = local_conv_size // 2
self.local_conv = ConvNorm(dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim)
def forward(self, x):
B, H, W, C = x.shape
L = H * W
shortcut = x
if H == self.window_size and W == self.window_size:
x = x.reshape(B, L, C)
x = self.attn(x)
x = x.view(B, H, W, C)
else:
pad_b = (self.window_size - H % self.window_size) % self.window_size
pad_r = (self.window_size - W % self.window_size) % self.window_size
padding = pad_b > 0 or pad_r > 0
if padding:
x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
# window partition
pH, pW = H + pad_b, W + pad_r
nH = pH // self.window_size
nW = pW // self.window_size
x = x.view(B, nH, self.window_size, nW, self.window_size, C).transpose(2, 3).reshape(
B * nH * nW, self.window_size * self.window_size, C
)
x = self.attn(x)
# window reverse
x = x.view(B, nH, nW, self.window_size, self.window_size, C).transpose(2, 3).reshape(B, pH, pW, C)
if padding:
x = x[:, :H, :W].contiguous()
x = shortcut + self.drop_path1(x)
x = x.permute(0, 3, 1, 2)
x = self.local_conv(x)
x = x.reshape(B, C, L).transpose(1, 2)
x = x + self.drop_path2(self.mlp(x))
return x.view(B, H, W, C)
def extra_repr(self) -> str:
return f"dim={self.dim}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}"
register_notrace_module(TinyVitBlock)
class TinyVitStage(nn.Module):
""" A basic TinyViT layer for one stage.
Args:
dim (int): Number of input channels.
out_dim: the output dimension of the layer
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
local_conv_size: the kernel size of the depthwise convolution between attention and MLP. Default: 3
act_layer: the activation function. Default: nn.GELU
"""
def __init__(
self,
dim,
out_dim,
depth,
num_heads,
window_size,
mlp_ratio=4.,
drop=0.,
drop_path=0.,
downsample=None,
local_conv_size=3,
act_layer=nn.GELU,
):
super().__init__()
self.depth = depth
self.out_dim = out_dim
# patch merging layer
if downsample is not None:
self.downsample = downsample(
dim=dim,
out_dim=out_dim,
act_layer=act_layer,
)
else:
self.downsample = nn.Identity()
assert dim == out_dim
# build blocks
self.blocks = nn.Sequential(*[
TinyVitBlock(
dim=out_dim,
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
drop=drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
local_conv_size=local_conv_size,
act_layer=act_layer,
)
for i in range(depth)])
def forward(self, x):
x = self.downsample(x)
x = x.permute(0, 2, 3, 1) # BCHW -> BHWC
x = self.blocks(x)
x = x.permute(0, 3, 1, 2) # BHWC -> BCHW
return x
def extra_repr(self) -> str:
return f"dim={self.out_dim}, depth={self.depth}"
class TinyVit(nn.Module):
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
embed_dims=(96, 192, 384, 768),
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
window_sizes=(7, 7, 14, 7),
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
use_checkpoint=False,
mbconv_expand_ratio=4.0,
local_conv_size=3,
act_layer=nn.GELU,
):
super().__init__()
self.num_classes = num_classes
self.depths = depths
self.num_stages = len(depths)
self.mlp_ratio = mlp_ratio
self.grad_checkpointing = use_checkpoint
self.patch_embed = PatchEmbed(
in_chs=in_chans,
out_chs=embed_dims[0],
act_layer=act_layer,
)
# stochastic depth rate rule
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# build stages
self.stages = nn.Sequential()
stride = self.patch_embed.stride
prev_dim = embed_dims[0]
self.feature_info = []
for stage_idx in range(self.num_stages):
if stage_idx == 0:
stage = ConvLayer(
dim=prev_dim,
depth=depths[stage_idx],
act_layer=act_layer,
drop_path=dpr[:depths[stage_idx]],
conv_expand_ratio=mbconv_expand_ratio,
)
else:
out_dim = embed_dims[stage_idx]
drop_path_rate = dpr[sum(depths[:stage_idx]):sum(depths[:stage_idx + 1])]
stage = TinyVitStage(
dim=embed_dims[stage_idx - 1],
out_dim=out_dim,
depth=depths[stage_idx],
num_heads=num_heads[stage_idx],
window_size=window_sizes[stage_idx],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
local_conv_size=local_conv_size,
drop_path=drop_path_rate,
downsample=PatchMerging,
act_layer=act_layer,
)
prev_dim = out_dim
stride *= 2
self.stages.append(stage)
self.feature_info += [dict(num_chs=prev_dim, reduction=stride, module=f'stages.{stage_idx}')]
# Classifier head
self.num_features = self.head_hidden_size = embed_dims[-1]
norm_layer_cf = partial(LayerNorm2d, eps=1e-5)
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
norm_layer=norm_layer_cf,
)
# init weights
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'attention_biases'}
@torch.jit.ignore
def no_weight_decay(self):
return {x for x in self.state_dict().keys() if 'attention_biases' in x}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embed',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.patch_embed(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
if 'model' in state_dict.keys():
state_dict = state_dict['model']
target_sd = model.state_dict()
out_dict = {}
for k, v in state_dict.items():
if k.endswith('attention_bias_idxs'):
continue
if 'attention_biases' in k:
# TODO: whether move this func into model for dynamic input resolution? (high risk)
v = resize_rel_pos_bias_table_levit(v.T, target_sd[k].shape[::-1]).T
out_dict[k] = v
return out_dict
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv1.conv',
'classifier': 'head.fc',
'pool_size': (7, 7),
'input_size': (3, 224, 224),
'crop_pct': 0.95,
**kwargs,
}
default_cfgs = generate_default_cfgs({
'tiny_vit_5m_224.dist_in22k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_22k_distill.pth',
num_classes=21841
),
'tiny_vit_5m_224.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_22kto1k_distill.pth'
),
'tiny_vit_5m_224.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_1k.pth'
),
'tiny_vit_11m_224.dist_in22k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_22k_distill.pth',
num_classes=21841
),
'tiny_vit_11m_224.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_22kto1k_distill.pth'
),
'tiny_vit_11m_224.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_1k.pth'
),
'tiny_vit_21m_224.dist_in22k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22k_distill.pth',
num_classes=21841
),
'tiny_vit_21m_224.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_distill.pth'
),
'tiny_vit_21m_224.in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_1k.pth'
),
'tiny_vit_21m_384.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_384_distill.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
'tiny_vit_21m_512.dist_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_512_distill.pth',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash',
),
})
def _create_tiny_vit(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
model = build_model_with_cfg(
TinyVit,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs
)
return model
@register_model
def tiny_vit_5m_224(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[64, 128, 160, 320],
depths=[2, 2, 6, 2],
num_heads=[2, 4, 5, 10],
window_sizes=[7, 7, 14, 7],
drop_path_rate=0.0,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_5m_224', pretrained, **model_kwargs)
@register_model
def tiny_vit_11m_224(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[64, 128, 256, 448],
depths=[2, 2, 6, 2],
num_heads=[2, 4, 8, 14],
window_sizes=[7, 7, 14, 7],
drop_path_rate=0.1,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_11m_224', pretrained, **model_kwargs)
@register_model
def tiny_vit_21m_224(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[96, 192, 384, 576],
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 18],
window_sizes=[7, 7, 14, 7],
drop_path_rate=0.2,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_21m_224', pretrained, **model_kwargs)
@register_model
def tiny_vit_21m_384(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[96, 192, 384, 576],
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 18],
window_sizes=[12, 12, 24, 12],
drop_path_rate=0.1,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_21m_384', pretrained, **model_kwargs)
@register_model
def tiny_vit_21m_512(pretrained=False, **kwargs):
model_kwargs = dict(
embed_dims=[96, 192, 384, 576],
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 18],
window_sizes=[16, 16, 32, 16],
drop_path_rate=0.1,
)
model_kwargs.update(kwargs)
return _create_tiny_vit('tiny_vit_21m_512', pretrained, **model_kwargs)
|
pytorch-image-models/timm/models/tiny_vit.py/0
|
{
"file_path": "pytorch-image-models/timm/models/tiny_vit.py",
"repo_id": "pytorch-image-models",
"token_count": 12466
}
| 215
|
from .adabelief import AdaBelief
from .adafactor import Adafactor
from .adahessian import Adahessian
from .adamp import AdamP
from .adamw import AdamW
from .adan import Adan
from .lamb import Lamb
from .lars import Lars
from .lookahead import Lookahead
from .madgrad import MADGRAD
from .nadam import Nadam
from .nvnovograd import NvNovoGrad
from .radam import RAdam
from .rmsprop_tf import RMSpropTF
from .sgdp import SGDP
from .lion import Lion
from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs
|
pytorch-image-models/timm/optim/__init__.py/0
|
{
"file_path": "pytorch-image-models/timm/optim/__init__.py",
"repo_id": "pytorch-image-models",
"token_count": 170
}
| 216
|
"""RAdam Optimizer.
Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam
Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265
"""
import math
import torch
from torch.optim.optimizer import Optimizer
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
buffer=[[None, None, None] for _ in range(10)])
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_fp32 = p.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
state['step'] += 1
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
num_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
num_sma_max = 2 / (1 - beta2) - 1
num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = num_sma
# more conservative since it's an approximated value
if num_sma >= 5:
step_size = group['lr'] * math.sqrt(
(1 - beta2_t) *
(num_sma - 4) / (num_sma_max - 4) *
(num_sma - 2) / num_sma *
num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr'])
# more conservative since it's an approximated value
if num_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_fp32.addcdiv_(exp_avg, denom, value=-step_size)
else:
p_fp32.add_(exp_avg, alpha=-step_size)
p.copy_(p_fp32)
return loss
|
pytorch-image-models/timm/optim/radam.py/0
|
{
"file_path": "pytorch-image-models/timm/optim/radam.py",
"repo_id": "pytorch-image-models",
"token_count": 1967
}
| 217
|
""" Checkpoint Saver
Track top-n training checkpoints and maintain recovery checkpoints on specified intervals.
Hacked together by / Copyright 2020 Ross Wightman
"""
import glob
import operator
import os
import logging
import torch
from .model import unwrap_model, get_state_dict
_logger = logging.getLogger(__name__)
class CheckpointSaver:
def __init__(
self,
model,
optimizer,
args=None,
model_ema=None,
amp_scaler=None,
checkpoint_prefix='checkpoint',
recovery_prefix='recovery',
checkpoint_dir='',
recovery_dir='',
decreasing=False,
max_history=10,
unwrap_fn=unwrap_model):
# objects to save state_dicts of
self.model = model
self.optimizer = optimizer
self.args = args
self.model_ema = model_ema
self.amp_scaler = amp_scaler
# state
self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness
self.best_epoch = None
self.best_metric = None
self.curr_recovery_file = ''
self.last_recovery_file = ''
# config
self.checkpoint_dir = checkpoint_dir
self.recovery_dir = recovery_dir
self.save_prefix = checkpoint_prefix
self.recovery_prefix = recovery_prefix
self.extension = '.pth.tar'
self.decreasing = decreasing # a lower metric is better if True
self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs
self.max_history = max_history
self.unwrap_fn = unwrap_fn
assert self.max_history >= 1
def save_checkpoint(self, epoch, metric=None):
assert epoch >= 0
tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension)
last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension)
self._save(tmp_save_path, epoch, metric)
if os.path.exists(last_save_path):
os.unlink(last_save_path) # required for Windows support.
os.rename(tmp_save_path, last_save_path)
worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None
if (len(self.checkpoint_files) < self.max_history
or metric is None or self.cmp(metric, worst_file[1])):
if len(self.checkpoint_files) >= self.max_history:
self._cleanup_checkpoints(1)
filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension
save_path = os.path.join(self.checkpoint_dir, filename)
os.link(last_save_path, save_path)
self.checkpoint_files.append((save_path, metric))
self.checkpoint_files = sorted(
self.checkpoint_files, key=lambda x: x[1],
reverse=not self.decreasing) # sort in descending order if a lower metric is not better
checkpoints_str = "Current checkpoints:\n"
for c in self.checkpoint_files:
checkpoints_str += ' {}\n'.format(c)
_logger.info(checkpoints_str)
if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)):
self.best_epoch = epoch
self.best_metric = metric
best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension)
if os.path.exists(best_save_path):
os.unlink(best_save_path)
os.link(last_save_path, best_save_path)
return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch)
def _save(self, save_path, epoch, metric=None):
save_state = {
'epoch': epoch,
'arch': type(self.model).__name__.lower(),
'state_dict': get_state_dict(self.model, self.unwrap_fn),
'optimizer': self.optimizer.state_dict(),
'version': 2, # version < 2 increments epoch before save
}
if self.args is not None:
save_state['arch'] = self.args.model
save_state['args'] = self.args
if self.amp_scaler is not None:
save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict()
if self.model_ema is not None:
save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn)
if metric is not None:
save_state['metric'] = metric
torch.save(save_state, save_path)
def _cleanup_checkpoints(self, trim=0):
trim = min(len(self.checkpoint_files), trim)
delete_index = self.max_history - trim
if delete_index < 0 or len(self.checkpoint_files) <= delete_index:
return
to_delete = self.checkpoint_files[delete_index:]
for d in to_delete:
try:
_logger.debug("Cleaning checkpoint: {}".format(d))
os.remove(d[0])
except Exception as e:
_logger.error("Exception '{}' while deleting checkpoint".format(e))
self.checkpoint_files = self.checkpoint_files[:delete_index]
def save_recovery(self, epoch, batch_idx=0):
assert epoch >= 0
filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension
save_path = os.path.join(self.recovery_dir, filename)
self._save(save_path, epoch)
if os.path.exists(self.last_recovery_file):
try:
_logger.debug("Cleaning recovery: {}".format(self.last_recovery_file))
os.remove(self.last_recovery_file)
except Exception as e:
_logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file))
self.last_recovery_file = self.curr_recovery_file
self.curr_recovery_file = save_path
def find_recovery(self):
recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix)
files = glob.glob(recovery_path + '*' + self.extension)
files = sorted(files)
return files[0] if len(files) else ''
|
pytorch-image-models/timm/utils/checkpoint_saver.py/0
|
{
"file_path": "pytorch-image-models/timm/utils/checkpoint_saver.py",
"repo_id": "pytorch-image-models",
"token_count": 2818
}
| 218
|
#!/usr/bin/env python3
""" ImageNet Validation Script
This is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained
models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes
canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit.
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
import argparse
import csv
import glob
import json
import logging
import os
import time
from collections import OrderedDict
from contextlib import suppress
from functools import partial
import torch
import torch.nn as nn
import torch.nn.parallel
from timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet
from timm.layers import apply_test_time_pool, set_fast_norm
from timm.models import create_model, load_checkpoint, is_model, list_models
from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_fuser, \
decay_batch_step, check_batch_size_retry, ParseKwargs, reparameterize_model
try:
from apex import amp
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
from functorch.compile import memory_efficient_fusion
has_functorch = True
except ImportError as e:
has_functorch = False
has_compile = hasattr(torch, 'compile')
_logger = logging.getLogger('validate')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument('data', nargs='?', metavar='DIR', const=None,
help='path to dataset (*deprecated*, use --data-dir)')
parser.add_argument('--data-dir', metavar='DIR',
help='path to dataset (root dir)')
parser.add_argument('--dataset', metavar='NAME', default='',
help='dataset type + name ("<type>/<name>") (default: ImageFolder or ImageTar if empty)')
parser.add_argument('--split', metavar='NAME', default='validation',
help='dataset split (default: validation)')
parser.add_argument('--num-samples', default=None, type=int,
metavar='N', help='Manually specify num samples in dataset split, for IterableDatasets.')
parser.add_argument('--dataset-download', action='store_true', default=False,
help='Allow download of dataset for torch/ and tfds/ datasets that support it.')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
parser.add_argument('--input-key', default=None, type=str,
help='Dataset key for input images.')
parser.add_argument('--input-img-mode', default=None, type=str,
help='Dataset image conversion mode for input images.')
parser.add_argument('--target-key', default=None, type=str,
help='Dataset key for target labels.')
parser.add_argument('--model', '-m', metavar='NAME', default='dpn92',
help='model architecture (default: dpn92)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--in-chans', type=int, default=None, metavar='N',
help='Image input channels (default: None => 3)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--use-train-size', action='store_true', default=False,
help='force use of train input size, even when test size is specified in pretrained cfg')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop pct')
parser.add_argument('--crop-mode', default=None, type=str,
metavar='N', help='Input image crop mode (squash, border, center). Model default if None.')
parser.add_argument('--crop-border-pixels', type=int, default=None,
help='Crop pixels from image border.')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--test-pool', dest='test_pool', action='store_true',
help='enable test time pool')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--device', default='cuda', type=str,
help="Device (accelerator) to use.")
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--amp-dtype', default='float16', type=str,
help='lower precision AMP dtype (default: float16)')
parser.add_argument('--amp-impl', default='native', type=str,
help='AMP impl to use, "native" or "apex" (default: native)')
parser.add_argument('--tf-preprocessing', action='store_true', default=False,
help='Use Tensorflow preprocessing pipeline (require CPU TF installed')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--fast-norm', default=False, action='store_true',
help='enable experimental fast-norm')
parser.add_argument('--reparam', default=False, action='store_true',
help='Reparameterize model')
parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs)
scripting_group = parser.add_mutually_exclusive_group()
scripting_group.add_argument('--torchscript', default=False, action='store_true',
help='torch.jit.script the full model')
scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor',
help="Enable compilation w/ specified backend (default: inductor).")
scripting_group.add_argument('--aot-autograd', default=False, action='store_true',
help="Enable AOT Autograd support.")
parser.add_argument('--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for validation results (summary)')
parser.add_argument('--results-format', default='csv', type=str,
help='Format for results file one of (csv, json) (default: csv).')
parser.add_argument('--real-labels', default='', type=str, metavar='FILENAME',
help='Real labels JSON file for imagenet evaluation')
parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME',
help='Valid label indices txt file for validation of partial label space')
parser.add_argument('--retry', default=False, action='store_true',
help='Enable batch size decay & retry for single model validation')
def validate(args):
# might as well try to validate something
args.pretrained = args.pretrained or not args.checkpoint
args.prefetcher = not args.no_prefetcher
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
device = torch.device(args.device)
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
amp_autocast = suppress
if args.amp:
if args.amp_impl == 'apex':
assert has_apex, 'AMP impl specified as APEX but APEX is not installed.'
assert args.amp_dtype == 'float16'
use_amp = 'apex'
_logger.info('Validating in mixed precision with NVIDIA APEX AMP.')
else:
assert has_native_amp, 'Please update PyTorch to a version with native AMP (or use APEX).'
assert args.amp_dtype in ('float16', 'bfloat16')
use_amp = 'native'
amp_dtype = torch.bfloat16 if args.amp_dtype == 'bfloat16' else torch.float16
amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype)
_logger.info('Validating in mixed precision with native PyTorch AMP.')
else:
_logger.info('Validating in float32. AMP not enabled.')
if args.fuser:
set_jit_fuser(args.fuser)
if args.fast_norm:
set_fast_norm()
# create model
in_chans = 3
if args.in_chans is not None:
in_chans = args.in_chans
elif args.input_size is not None:
in_chans = args.input_size[0]
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
in_chans=in_chans,
global_pool=args.gp,
scriptable=args.torchscript,
**args.model_kwargs,
)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes
if args.checkpoint:
load_checkpoint(model, args.checkpoint, args.use_ema)
if args.reparam:
model = reparameterize_model(model)
param_count = sum([m.numel() for m in model.parameters()])
_logger.info('Model %s created, param count: %d' % (args.model, param_count))
data_config = resolve_data_config(
vars(args),
model=model,
use_test_size=not args.use_train_size,
verbose=True,
)
test_time_pool = False
if args.test_pool:
model, test_time_pool = apply_test_time_pool(model, data_config)
model = model.to(device)
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.torchscript:
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
model = torch.jit.script(model)
elif args.torchcompile:
assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.'
torch._dynamo.reset()
model = torch.compile(model, backend=args.torchcompile)
elif args.aot_autograd:
assert has_functorch, "functorch is needed for --aot-autograd"
model = memory_efficient_fusion(model)
if use_amp == 'apex':
model = amp.initialize(model, opt_level='O1')
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))
criterion = nn.CrossEntropyLoss().to(device)
root_dir = args.data or args.data_dir
if args.input_img_mode is None:
input_img_mode = 'RGB' if data_config['input_size'][0] == 3 else 'L'
else:
input_img_mode = args.input_img_mode
dataset = create_dataset(
root=root_dir,
name=args.dataset,
split=args.split,
download=args.dataset_download,
load_bytes=args.tf_preprocessing,
class_map=args.class_map,
num_samples=args.num_samples,
input_key=args.input_key,
input_img_mode=input_img_mode,
target_key=args.target_key,
)
if args.valid_labels:
with open(args.valid_labels, 'r') as f:
valid_labels = [int(line.rstrip()) for line in f]
else:
valid_labels = None
if args.real_labels:
real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels)
else:
real_labels = None
crop_pct = 1.0 if test_time_pool else data_config['crop_pct']
loader = create_loader(
dataset,
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=crop_pct,
crop_mode=data_config['crop_mode'],
crop_border_pixels=args.crop_border_pixels,
pin_memory=args.pin_mem,
device=device,
tf_preprocessing=args.tf_preprocessing,
)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
with torch.no_grad():
# warmup, reduce variability of first batch time, especially for comparing torchscript vs non
input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).to(device)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
model(input)
end = time.time()
for batch_idx, (input, target) in enumerate(loader):
if args.no_prefetcher:
target = target.to(device)
input = input.to(device)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
# compute output
with amp_autocast():
output = model(input)
if valid_labels is not None:
output = output[:, valid_labels]
loss = criterion(output, target)
if real_labels is not None:
real_labels.add_result(output)
# measure accuracy and record loss
acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info(
'Test: [{0:>4d}/{1}] '
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) '
'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(
batch_idx,
len(loader),
batch_time=batch_time,
rate_avg=input.size(0) / batch_time.avg,
loss=losses,
top1=top1,
top5=top5
)
)
if real_labels is not None:
# real labels mode replaces topk values at the end
top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5)
else:
top1a, top5a = top1.avg, top5.avg
results = OrderedDict(
model=args.model,
top1=round(top1a, 4), top1_err=round(100 - top1a, 4),
top5=round(top5a, 4), top5_err=round(100 - top5a, 4),
param_count=round(param_count / 1e6, 2),
img_size=data_config['input_size'][-1],
crop_pct=crop_pct,
interpolation=data_config['interpolation'],
)
_logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})'.format(
results['top1'], results['top1_err'], results['top5'], results['top5_err']))
return results
def _try_run(args, initial_batch_size):
batch_size = initial_batch_size
results = OrderedDict()
error_str = 'Unknown'
while batch_size:
args.batch_size = batch_size * args.num_gpu # multiply by num-gpu for DataParallel case
try:
if torch.cuda.is_available() and 'cuda' in args.device:
torch.cuda.empty_cache()
results = validate(args)
return results
except RuntimeError as e:
error_str = str(e)
_logger.error(f'"{error_str}" while running validation.')
if not check_batch_size_retry(error_str):
break
batch_size = decay_batch_step(batch_size)
_logger.warning(f'Reducing batch size to {batch_size} for retry.')
results['error'] = error_str
_logger.error(f'{args.model} failed to validate ({error_str}).')
return results
_NON_IN1K_FILTERS = ['*_in21k', '*_in22k', '*in12k', '*_dino', '*fcmae', '*seer']
def main():
setup_default_logging()
args = parser.parse_args()
model_cfgs = []
model_names = []
if os.path.isdir(args.checkpoint):
# validate all checkpoints in a path with same model
checkpoints = glob.glob(args.checkpoint + '/*.pth.tar')
checkpoints += glob.glob(args.checkpoint + '/*.pth')
model_names = list_models(args.model)
model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)]
else:
if args.model == 'all':
# validate all models in a list of names with pretrained checkpoints
args.pretrained = True
model_names = list_models(
pretrained=True,
exclude_filters=_NON_IN1K_FILTERS,
)
model_cfgs = [(n, '') for n in model_names]
elif not is_model(args.model):
# model name doesn't exist, try as wildcard filter
model_names = list_models(
args.model,
pretrained=True,
)
model_cfgs = [(n, '') for n in model_names]
if not model_cfgs and os.path.isfile(args.model):
with open(args.model) as f:
model_names = [line.rstrip() for line in f]
model_cfgs = [(n, None) for n in model_names if n]
if len(model_cfgs):
_logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
results = []
try:
initial_batch_size = args.batch_size
for m, c in model_cfgs:
args.model = m
args.checkpoint = c
r = _try_run(args, initial_batch_size)
if 'error' in r:
continue
if args.checkpoint:
r['checkpoint'] = args.checkpoint
results.append(r)
except KeyboardInterrupt as e:
pass
results = sorted(results, key=lambda x: x['top1'], reverse=True)
else:
if args.retry:
results = _try_run(args, args.batch_size)
else:
results = validate(args)
if args.results_file:
write_results(args.results_file, results, format=args.results_format)
# output results in JSON to stdout w/ delimiter for runner script
print(f'--result\n{json.dumps(results, indent=4)}')
def write_results(results_file, results, format='csv'):
with open(results_file, mode='w') as cf:
if format == 'json':
json.dump(results, cf, indent=4)
else:
if not isinstance(results, (list, tuple)):
results = [results]
if not results:
return
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
|
pytorch-image-models/validate.py/0
|
{
"file_path": "pytorch-image-models/validate.py",
"repo_id": "pytorch-image-models",
"token_count": 9310
}
| 219
|
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
exclude: docs/source/reference/launcher.md
- repo: https://github.com/psf/black
rev: 24.2.0
hooks:
- id: black
- repo: https://github.com/doublify/pre-commit-rust
rev: v1.0
hooks:
- id: cargo-check
- id: fmt
- id: clippy
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.0
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
|
text-generation-inference/.pre-commit-config.yaml/0
|
{
"file_path": "text-generation-inference/.pre-commit-config.yaml",
"repo_id": "text-generation-inference",
"token_count": 298
}
| 220
|
{
"__inputs": [
{
"name": "DS_PROMETHEUS_EKS API INFERENCE PROD",
"label": "Prometheus EKS API Inference Prod",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__elements": {},
"__requires": [
{
"type": "panel",
"id": "gauge",
"name": "Gauge",
"version": ""
},
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "10.0.2"
},
{
"type": "panel",
"id": "heatmap",
"name": "Heatmap",
"version": ""
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
},
{
"type": "panel",
"id": "timeseries",
"name": "Time series",
"version": ""
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 2,
"id": 551,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"fieldMinMax": false,
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 1000
}
]
},
"unit": "ms"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 0
},
"id": 49,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": false
},
"showPercentChange": false,
"textMode": "auto",
"wideLayout": true
},
"pluginVersion": "10.4.2",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "(histogram_quantile(0.5, sum by (le) (rate(tgi_request_queue_duration_bucket{container=\"$service\"}[10m]))) * 1000) > 0",
"hide": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "(histogram_quantile(0.5, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"prefill\", container=\"$service\"}[10m]))) * 1000) > 0",
"hide": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "C"
},
{
"datasource": {
"name": "Expression",
"type": "__expr__",
"uid": "__expr__"
},
"expression": "$B + $C",
"hide": false,
"refId": "D",
"type": "math"
}
],
"title": "Time to first token",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "ms"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 9,
"y": 0
},
"id": 44,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": false
},
"showPercentChange": false,
"textMode": "auto",
"wideLayout": true
},
"pluginVersion": "10.4.2",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "(histogram_quantile(0.5, sum by (le) (rate(tgi_batch_forward_duration_bucket{method=\"decode\", container=\"$service\"}[10m]))) * 1000)>0",
"instant": false,
"range": true,
"refId": "A"
}
],
"title": "Decode per-token latency",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 7,
"x": 17,
"y": 0
},
"id": 45,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"mean"
],
"fields": "",
"values": false
},
"showPercentChange": false,
"textMode": "auto",
"wideLayout": true
},
"pluginVersion": "10.4.2",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "sum((rate(tgi_request_generated_tokens_sum{container=\"$service\"}[10m]) / rate(tgi_request_generated_tokens_count{container=\"$service\"}[10m]))>0)",
"instant": false,
"range": true,
"refId": "A"
}
],
"title": "Throughput (generated tok/s)",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "none"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 7
},
"id": 48,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_input_length_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_input_length_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_input_length_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Number of tokens per prompt",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "none"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 7
},
"id": 30,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_generated_tokens_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_generated_tokens_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_generated_tokens_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Number of generated tokens per request",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 15
},
"id": 20,
"panels": [],
"title": "General",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 16
},
"id": 4,
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "sum(increase(tgi_request_success{container=\"$service\"}[1m]))",
"legendFormat": "Success",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "sum(increase(tgi_request_failure{container=\"$service\"}[1m])) by (err)",
"hide": false,
"legendFormat": "Error: {{err}}",
"range": true,
"refId": "B"
}
],
"title": "Requests",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 13,
"w": 9,
"x": 6,
"y": 16
},
"id": 6,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_mean_time_per_token_duration_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_mean_time_per_token_duration_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_mean_time_per_token_duration_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Mean Time Per Token quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 13,
"w": 9,
"x": 15,
"y": 16
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 13,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_request_mean_time_per_token_duration_bucket{container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{Β le }}",
"range": true,
"refId": "A"
}
],
"title": "Mean Time Per Token",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "orange",
"value": 70
},
{
"color": "red",
"value": 85
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 5,
"w": 3,
"x": 0,
"y": 24
},
"id": 18,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "count(tgi_request_count{container=\"$service\"})",
"legendFormat": "Replicas",
"range": true,
"refId": "A"
}
],
"title": "Number of replicas",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "orange",
"value": 70
},
{
"color": "red",
"value": 85
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 5,
"w": 3,
"x": 3,
"y": 24
},
"id": 32,
"options": {
"minVizHeight": 75,
"minVizWidth": 75,
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"sizing": "auto"
},
"pluginVersion": "10.4.2",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "sum(tgi_queue_size{container=\"$service\"})",
"legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
"title": "Queue Size",
"type": "gauge"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 29
},
"id": 26,
"panels": [],
"title": "Batching",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "bars",
"fillOpacity": 50,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 5,
"w": 6,
"x": 0,
"y": 30
},
"id": 29,
"maxDataPoints": 40,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "avg(tgi_batch_current_max_tokens{container=\"$service\"})",
"legendFormat": "{{ pod }}",
"range": true,
"refId": "A"
}
],
"title": "Max tokens per batch",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "none"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 4,
"x": 6,
"y": 30
},
"id": 33,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_skipped_tokens_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_skipped_tokens_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_skipped_tokens_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Speculated Tokens",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "none"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 5,
"x": 10,
"y": 30
},
"id": 46,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_input_length_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_input_length_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_input_length_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Prompt Tokens",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 9,
"x": 15,
"y": 30
},
"id": 8,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_duration_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_duration_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_duration_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Latency quantiles",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "bars",
"fillOpacity": 50,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 0,
"y": 35
},
"id": 27,
"maxDataPoints": 40,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "9.1.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "avg(tgi_batch_current_size{container=\"$service\"})",
"legendFormat": "{{ pod }}",
"range": true,
"refId": "A"
}
],
"title": "Batch Size",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 6,
"x": 0,
"y": 39
},
"id": 28,
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "sum(increase(tgi_batch_concat{container=\"$service\"}[1m])) by (reason)",
"hide": false,
"legendFormat": "Reason: {{ reason }}",
"range": true,
"refId": "B"
}
],
"title": "Concatenates",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 9,
"x": 6,
"y": 39
},
"id": 31,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_request_queue_duration_bucket{container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_request_queue_duration_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_request_queue_duration_bucket{container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Queue quantiles",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 48
},
"id": 22,
"panels": [],
"title": "Prefill",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 12,
"x": 0,
"y": 49
},
"id": 7,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"prefill\", container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"prefill\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"prefill\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Prefill Quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 11,
"w": 12,
"x": 12,
"y": 49
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 14,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_batch_inference_duration_bucket{method=\"prefill\", container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{Β le }}",
"range": true,
"refId": "A"
}
],
"title": "Prefill Latency",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 60
},
"id": 24,
"panels": [],
"title": "Decode",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 12,
"x": 0,
"y": 61
},
"id": 11,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_batch_inference_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Decode quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 11,
"w": 12,
"x": 12,
"y": 61
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 15,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_batch_inference_duration_bucket{method=\"decode\", container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{Β le }}",
"range": true,
"refId": "A"
}
],
"title": "Decode Latency",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 72
},
"id": 43,
"panels": [],
"title": "Debug",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 6,
"x": 0,
"y": 73
},
"id": 38,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_batch_forward_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_batch_forward_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_batch_forward_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Forward quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 11,
"w": 6,
"x": 6,
"y": 73
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 35,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_batch_forward_duration_bucket{method=\"decode\", container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{Β le }}",
"range": true,
"refId": "A"
}
],
"title": "Forward Latency",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 6,
"x": 12,
"y": 73
},
"id": 34,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_batch_decode_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_batch_decode_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_batch_decode_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Token Decode quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 11,
"w": 6,
"x": 18,
"y": 73
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 40,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_batch_decode_duration_bucket{method=\"decode\", container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{Β le }}",
"range": true,
"refId": "A"
}
],
"title": "Token Decode Latency",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 6,
"x": 0,
"y": 84
},
"id": 42,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_batch_filter_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_batch_filter_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_batch_filter_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Filter Batch quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 11,
"w": 6,
"x": 6,
"y": 84
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 39,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_batch_filter_duration_bucket{method=\"decode\", container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{Β le }}",
"range": true,
"refId": "A"
}
],
"title": "Filter Batch Latency",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "p50"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p90"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "p99"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 6,
"x": 12,
"y": 84
},
"id": 36,
"options": {
"legend": {
"calcs": [
"min",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.5, sum by (le) (rate(tgi_batch_concat_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"legendFormat": "p50",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.9, sum by (le) (rate(tgi_batch_concat_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p90",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"expr": "histogram_quantile(0.99, sum by (le) (rate(tgi_batch_concat_duration_bucket{method=\"decode\", container=\"$service\"}[10m])))",
"hide": false,
"legendFormat": "p99",
"range": true,
"refId": "C"
}
],
"title": "Batch Concat quantiles",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#5794F2",
"colorScale": "linear",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 11,
"w": 6,
"x": 18,
"y": 84
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 41,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#5794F2",
"min": 0,
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Spectral",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"mode": "single",
"showColorScale": false,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"decimals": 1,
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "10.4.2",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"editorMode": "code",
"exemplar": true,
"expr": "sum(increase(tgi_batch_concat_duration_bucket{method=\"decode\", container=\"$service\"}[5m])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{Β le }}",
"range": true,
"refId": "A"
}
],
"title": "Batch Concat latency",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"decimals": 1,
"format": "s",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
}
],
"refresh": "",
"schemaVersion": 39,
"tags": [],
"templating": {
"list": [
{
"current": {
"selected": false,
"text": "gpu-txt-gen-cohereforai-c4ai-command-r-plu-ba7f1",
"value": "gpu-txt-gen-cohereforai-c4ai-command-r-plu-ba7f1"
},
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS_EKS API INFERENCE PROD}"
},
"definition": "label_values(tgi_request_count, container)",
"hide": 0,
"includeAll": false,
"multi": false,
"name": "service",
"options": [],
"query": {
"query": "label_values(tgi_request_count, container)",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"type": "query"
}
]
},
"time": {
"from": "now-30m",
"to": "now-30s"
},
"timepicker": {
"nowDelay": "30s"
},
"timezone": "",
"title": "Text Generation Inference",
"uid": "RHSk7EL4kdqsd",
"version": 12,
"weekStart": ""
}
|
text-generation-inference/assets/tgi_grafana.json/0
|
{
"file_path": "text-generation-inference/assets/tgi_grafana.json",
"repo_id": "text-generation-inference",
"token_count": 62818
}
| 221
|
use cxx_build::CFG;
use pkg_config;
use std::env;
use std::env::consts::ARCH;
use std::path::{absolute, PathBuf};
const ADDITIONAL_BACKEND_LINK_LIBRARIES: [&str; 2] = ["spdlog", "fmt"];
const CUDA_ARCH_LIST: Option<&str> = option_env!("CUDA_ARCH_LIST");
const CUDA_REQUIRED_VERSION: &str = "12.5";
const MPI_REQUIRED_VERSION: &str = "4.1";
const INSTALL_PREFIX: Option<&str> = option_env!("CMAKE_INSTALL_PREFIX");
const TENSORRT_ROOT_DIR: Option<&str> = option_env!("TENSORRT_ROOT_DIR");
const NCCL_ROOT_DIR: Option<&str> = option_env!("NCCL_ROOT_DIR");
// Dependencies
const BACKEND_DEPS: [&str; 2] = ["tgi_trtllm_backend_impl", "tgi_trtllm_backend"];
const CUDA_TRANSITIVE_DEPS: [&str; 4] = ["cuda", "cudart", "cublas", "nvidia-ml"];
const TENSORRT_LLM_TRANSITIVE_DEPS: [(&str, &str); 5] = [
("dylib", "tensorrt_llm"),
("static", "tensorrt_llm_executor_static"),
("dylib", "tensorrt_llm_nvrtc_wrapper"),
("dylib", "nvinfer_plugin_tensorrt_llm"),
("dylib", "decoder_attention"),
];
macro_rules! probe {
($name: expr, $version: expr) => {
if let Err(_) = pkg_config::probe_library($name) {
pkg_config::probe_library(&format!("{}-{}", $name, $version))
.expect(&format!("Failed to locate {}", $name));
}
};
}
fn build_backend(is_debug: bool, opt_level: &str, out_dir: &PathBuf) -> (PathBuf, PathBuf) {
// Build the backend implementation through CMake
let install_path = INSTALL_PREFIX.unwrap_or("/usr/local/tgi");
let tensorrt_path = TENSORRT_ROOT_DIR.unwrap_or("/usr/local/tensorrt");
let cuda_arch_list = CUDA_ARCH_LIST.unwrap_or("90-real"); // Hopper by default
let mut install_path = PathBuf::from(install_path);
if !install_path.is_absolute() {
install_path = absolute(out_dir).expect("cannot happen").join(install_path);
}
let _ = cmake::Config::new(".")
.uses_cxx11()
.generator("Ninja")
.profile(match is_debug {
true => "Debug",
false => "Release",
})
.env("OPT_LEVEL", opt_level)
.define("CMAKE_INSTALL_PREFIX", &install_path)
.define("CMAKE_CUDA_COMPILER", "/usr/local/cuda/bin/nvcc")
.define("TGI_TRTLLM_BACKEND_TARGET_CUDA_ARCH_LIST", cuda_arch_list)
.define("TGI_TRTLLM_BACKEND_TRT_ROOT", tensorrt_path)
.build();
// Additional transitive CMake dependencies
let deps_folder = out_dir.join("build").join("_deps");
for dependency in ADDITIONAL_BACKEND_LINK_LIBRARIES {
let dep_name = match is_debug {
true => format!("{}d", dependency),
false => String::from(dependency),
};
let dep_path = deps_folder.join(format!("{}-build", dependency));
println!("cargo:rustc-link-search={}", dep_path.display());
println!("cargo:rustc-link-lib=static={}", dep_name);
}
// Emit linkage information from the artifacts we just built
let install_lib_path = install_path.join("lib");
println!(
r"cargo:warning=Adding link search path: {}",
install_lib_path.display()
);
println!(r"cargo:rustc-link-search={}", install_lib_path.display());
(PathBuf::from(install_path), deps_folder)
}
fn build_ffi_layer(deps_folder: &PathBuf) {
CFG.include_prefix = "backends/trtllm";
cxx_build::bridge("src/lib.rs")
.static_flag(true)
.include(deps_folder.join("fmt-src").join("include"))
.include(deps_folder.join("spdlog-src").join("include"))
.include(deps_folder.join("json-src").join("include"))
.include(deps_folder.join("trtllm-src").join("cpp").join("include"))
.include("/usr/local/cuda/include")
.include("/usr/local/tensorrt/include")
.file("src/ffi.cpp")
.std("c++20")
.compile("tgi_trtllm_backend");
println!("cargo:rerun-if-changed=CMakeLists.txt");
println!("cargo:rerun-if-changed=include/backend.h");
println!("cargo:rerun-if-changed=lib/backend.cpp");
println!("cargo:rerun-if-changed=include/ffi.h");
println!("cargo:rerun-if-changed=src/ffi.cpp");
}
fn main() {
// Misc variables
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
let build_profile = env::var("PROFILE").unwrap();
let (is_debug, opt_level) = match build_profile.as_ref() {
"debug" => (true, "0"),
_ => (false, "3"),
};
// Build the backend
let (_backend_path, deps_folder) = build_backend(is_debug, opt_level, &out_dir);
// Build the FFI layer calling the backend above
build_ffi_layer(&deps_folder);
// Emit linkage search path
probe!("ompi", MPI_REQUIRED_VERSION);
// Probe CUDA & co. with pkg-config
CUDA_TRANSITIVE_DEPS.iter().for_each(|name| {
probe!(name, CUDA_REQUIRED_VERSION);
});
// NCCL is slightly trickier because it might not have a pkgconfig installed
let nccl_library_path_default = format!("/usr/local/{}-linux-gnu", ARCH);
let nccl_library_path = NCCL_ROOT_DIR.unwrap_or(&nccl_library_path_default);
println!(r"cargo:rustc-link-search=native={}", nccl_library_path);
println!("cargo:rustc-link-lib=dylib=nccl");
// TensorRT
let tensort_library_path = TENSORRT_ROOT_DIR.unwrap_or("/usr/local/tensorrt/lib");
println!(r"cargo:rustc-link-search=native={}", tensort_library_path);
println!("cargo:rustc-link-lib=dylib=nvinfer");
// TensorRT-LLM
TENSORRT_LLM_TRANSITIVE_DEPS
.iter()
.for_each(|(link_type, name)| {
println!("cargo:rustc-link-lib={}={}", link_type, name);
});
// Backend
BACKEND_DEPS.iter().for_each(|name| {
println!("cargo:rustc-link-lib=static={}", name);
});
}
|
text-generation-inference/backends/trtllm/build.rs/0
|
{
"file_path": "text-generation-inference/backends/trtllm/build.rs",
"repo_id": "text-generation-inference",
"token_count": 2548
}
| 222
|
//
// Created by mfuntowicz on 7/2/24.
//
#include <catch2/catch_all.hpp>
#include <spdlog/spdlog.h>
#include "../include/backend.h"
TEST_CASE("Load TRTLLM Engine on the TGI Backend", "[trtllm][engine][load]") {
const auto engines = std::filesystem::path("/home/mfuntowicz/.cache/huggingface/assets/trtllm/0.11.0.dev2024062500/meta-llama--Meta-Llama-3-8B-Instruct/4090/engines/");
const auto executor = std::filesystem::path("/home/mfuntowicz/Workspace/text-generation-inference/backends/trtllm/cmake-build-debug/cmake-build-debug/_deps/trtllm-src/cpp/tensorrt_llm/executor_worker/executorWorker");
spdlog::info("Loading config from: {}", absolute(engines).string());
huggingface::tgi::backends::TensorRtLlmBackend backend(engines, executor);
}
|
text-generation-inference/backends/trtllm/tests/infer_test.cpp/0
|
{
"file_path": "text-generation-inference/backends/trtllm/tests/infer_test.cpp",
"repo_id": "text-generation-inference",
"token_count": 306
}
| 223
|
/// Inspired by https://github.com/orhun/rust-tui-template/blob/472aa515119d4c94903eac12d9784417281dc7f5/src/event.rs
use crossterm::event;
use std::time::{Duration, Instant};
use tokio::sync::{broadcast, mpsc};
/// Events
#[derive(Debug)]
pub(crate) enum Event {
/// Terminal tick.
Tick,
/// Key press.
Key(event::KeyEvent),
/// Terminal resize.
Resize,
}
pub(crate) async fn terminal_event_task(
fps: u32,
event_sender: mpsc::Sender<Event>,
mut shutdown_receiver: broadcast::Receiver<()>,
_shutdown_guard_sender: mpsc::Sender<()>,
) {
// End task if a message is received on shutdown_receiver
// _shutdown_guard_sender will be dropped once the task is finished
tokio::select! {
_ = event_loop(fps, event_sender) => {
},
_ = shutdown_receiver.recv() => {}
}
}
/// Main event loop
async fn event_loop(fps: u32, event_sender: mpsc::Sender<Event>) {
// Frame budget
let per_frame = Duration::from_secs(1) / fps;
// When was last frame executed
let mut last_frame = Instant::now();
loop {
// Sleep to avoid blocking the thread for too long
if let Some(sleep) = per_frame.checked_sub(last_frame.elapsed()) {
tokio::time::sleep(sleep).await;
}
// Get crossterm event and send a new one over the channel
if event::poll(Duration::from_secs(0)).expect("no events available") {
match event::read().expect("unable to read event") {
event::Event::Key(e) => event_sender.send(Event::Key(e)).await.unwrap_or(()),
event::Event::Resize(_w, _h) => {
event_sender.send(Event::Resize).await.unwrap_or(())
}
_ => (),
}
}
// Frame budget exceeded
if last_frame.elapsed() >= per_frame {
// Send tick
event_sender.send(Event::Tick).await.unwrap_or(());
// Rest last_frame time
last_frame = Instant::now();
}
}
}
|
text-generation-inference/benchmark/src/event.rs/0
|
{
"file_path": "text-generation-inference/benchmark/src/event.rs",
"repo_id": "text-generation-inference",
"token_count": 913
}
| 224
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.7.0"
DEPRECATION_WARNING = (
"`text_generation` clients are deprecated and will be removed in the near future. "
"Please use the `InferenceClient` from the `huggingface_hub` package instead."
)
from text_generation.client import Client, AsyncClient # noqa E402
from text_generation.inference_api import ( # noqa E402
InferenceAPIClient,
InferenceAPIAsyncClient,
)
__all__ = [
"Client",
"AsyncClient",
"InferenceAPIClient",
"InferenceAPIAsyncClient",
]
|
text-generation-inference/clients/python/text_generation/__init__.py/0
|
{
"file_path": "text-generation-inference/clients/python/text_generation/__init__.py",
"repo_id": "text-generation-inference",
"token_count": 338
}
| 225
|
# Train Medusa
This tutorial will show you how to train a Medusa model on a dataset of your choice. Please check out the [speculation documentation](../conceptual/speculation) for more information on how Medusa works and speculation in general.
## What are the benefits of training a Medusa model?
Training Medusa heads can greatly improve the speed of generation. Medusa adds extra "heads" to LLMs to predict multiple future tokens simultaneously. When augmenting a model with Medusa, the original model stays untouched, and only the new heads are fine-tuned during training.
One of the most important things is to have a good dataset (with similar data to what will be used in production) because Medusa has a much higher hit-rate when the generation is in-domain.
If you train Medusa on a dataset that is very different from the one you will use in production then the model will not be able to predict the future tokens accurately and consequently the speedup will be minimal or non-existent.
## Self-distillation (Generating data for training)
There are many methods for preparing data for training, but one of the easiest and most effective ways is to "self-distill" the data. This means that you can use the same model to generate the data that you will use to train the model.
Essentially, you prompt the model with a similar input to what you will use in production and the model will generate the output.
We'll use this output to help train the medusa heads to predict the `n+1`, `n+2`, `n+3`, etc tokens in the sequence.
## Training
The original implementation of Medusa is available at [https://github.com/FasterDecoding/Medusa](https://github.com/FasterDecoding/Medusa) and we'll follow a very similar process to train the model as described on the original repository.
### Getting Started
There are two methods for training the model:
- `torchrun` that is a wrapper around `torch.distributed.launch`
- a forked version of `axlotl` that supports Medusa
In this tutorial we'll use `torchrun` to train the model as it is the most straightforward way to train the model but similar steps can be followed to train the model using `axlotl` if you prefer.
### Training with `torchrun`
```bash
mkdir medusa-training
cd medusa-training
pyenv install 3.10
pyenv local 3.10
uv venv -p 3.10
source .venv/bin/activate
```
Now lets clone the original `Medusa` repository and install the library.
```bash
git clone https://github.com/FasterDecoding/Medusa.git
cd Medusa
pip install -e .
```
Next we'll need some data to train on, we can use the `ShareGPT_Vicuna_unfiltered` dataset that is available on the Hugging Face Hub.
```bash
apt install git-lfs
git lfs install
git clone https://huggingface.co/datasets/Aeala/ShareGPT_Vicuna_unfiltered
```
Currently our directory structure looks like this:
```bash
.
βββ assets
βββ CITATION.cff
βββ create_data.py
βββ data_generation
βββ deepspeed.json
βββ last_run_prepared
βββ LICENSE
βββ llm_judge
βββ medusa
βββ medusa_llm.egg-info
βββ mistral.json
βββ notebooks
βββ pyproject.toml
βββ README.md
βββ ROADMAP.md
βββ scripts
βββ ShareGPT_Vicuna_unfiltered
βΒ Β βββ README.md
βΒ Β βββ ShareGPT_2023.05.04v0_Wasteland_Edition.json
βΒ Β βββ ShareGPT_V4.3_unfiltered_cleaned_split.json
βββ simple_gradio_interface.py
βββ tiny-llama.json
βββ vicuna_7b_qlora_stage1
```
## Start Training
Now the lets generate the data and start training the model. This process will take a while since we are generating data from the model.
First make sure you have an instance of TGI running with the model you want to use for self-distillation.
```bash
model=HuggingFaceH4/zephyr-7b-beta
volume=/home/ubuntu/.cache/huggingface/hub/
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model
```
Now we can generate the data using the `create_data.py` script.
```bash
python create_data.py \
--input-filename ShareGPT_Vicuna_unfiltered/ShareGPT_V4.3_unfiltered_cleaned_split.json \
--output-filename zephyr_self_distill.json
```
At this point our terminal should look like this:
<div class="flex justify-center">
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/medusa-train-large.gif"
width="550"
/>
</div>
> Note: In the screen shot above we are only using a the first 500 examples from the dataset to speed up the process, you should have a much larger dataset for training.
Now we can finally get to the fun part and start training the model!
Using `torchrun` we can easily launch the `medusa` training script with the `zephyr_self_distill.json` configuration file.
> NOTE: If you just self-distilled you may still have the model running, make sure to stop it before starting the training in order to allow all of the resources to be used for training.
```bash
WANDB_MODE=offline torchrun --nproc_per_node=4 medusa/train/train_legacy.py \
--model_name_or_path HuggingFaceH4/zephyr-7b-beta \
--data_path zephyr_self_distill.json \
--bf16 True \
--output_dir zephyr_out \
--num_train_epochs 5 \
--per_device_train_batch_size 4 \
--per_device_eval_batch_size 4 \
--gradient_accumulation_steps 4 \
--evaluation_strategy "no" \
--save_strategy "no" \
--learning_rate 1e-3 \
--weight_decay 0.0 \
--warmup_ratio 0.1 \
--lr_scheduler_type "cosine" \
--logging_steps 1 \
--tf32 True \
--model_max_length 2048 \
--lazy_preprocess True \
--medusa_num_heads 3 \
--medusa_num_layers 1 \
--deepspeed deepspeed.json
```
<div class="flex justify-center">
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/medusa-train-heads-large.gif"
width="550"
/>
</div>
If successful, you should see the similar output to the one below:
```bash
wandb: Run history:
wandb: train/epoch βββββββββββββββββββββ
β
β
β
β
β
β
βββββββββββββ
wandb: train/global_step βββββββββββββββββββββ
β
β
β
β
β
β
βββββββββββββ
wandb: train/learning_rate β
βββββββ
β
βββββββ
wandb: train/loss ββββββββββββββββ
wandb: train/medusa0_loss ββββββ
ββ
ββββββββββββββββββββββββββββββββ
wandb: train/medusa0_top1 βββββββββββββββββββββ
β
ββ
ββββ
βββββββ
βββββ
wandb: train/medusa1_loss βββββββ
β
ββββββββββββββββββββββββββββββββ
wandb: train/medusa1_top1 βββββββββββββββββββββ
β
ββββββ
βββ
ββββ
βββββ
wandb: train/medusa2_loss ββββββββββββββββββββββββββββββββββββββββ
wandb: train/medusa2_top1 βββββββββββββββββββββ
ββ
βββββ
ββββββββββββ
wandb: train/total_flos β
wandb: train/train_loss β
wandb: train/train_runtime β
wandb: train/train_samples_per_second β
wandb: train/train_steps_per_second β
wandb:
wandb: Run summary:
wandb: train/epoch 2.0
wandb: train/global_step 16
wandb: train/learning_rate 0.0
wandb: train/loss 14.8906
wandb: train/medusa0_loss 4.25
wandb: train/medusa0_top1 0.28809
wandb: train/medusa1_loss 4.8125
wandb: train/medusa1_top1 0.22727
wandb: train/medusa2_loss 5.5
wandb: train/medusa2_top1 0.17293
wandb: train/total_flos 0.0
wandb: train/train_loss 23.98242
wandb: train/train_runtime 396.9266
wandb: train/train_samples_per_second 2.519
wandb: train/train_steps_per_second 0.04
```
Last but most importantly, don't forget to push this model to the Hugging Face Hub so you can use it in your projects.
```bash
python -m medusa.hf_utils \
--folder zephyr_out_medusa_mlp_zephyr-7b-beta_medusa_3_lr_0.001_layers_1 \
--repo drbh/zephyr_medusa_demo
```
Woo, we've successfully trained a Medusa model and pushed it to the Hugging Face Hub! π
|
text-generation-inference/docs/source/basic_tutorials/train_medusa.md/0
|
{
"file_path": "text-generation-inference/docs/source/basic_tutorials/train_medusa.md",
"repo_id": "text-generation-inference",
"token_count": 3478
}
| 226
|
# Using TGI with Intel Gaudi
Check out this [repository](https://github.com/huggingface/tgi-gaudi) to serve models with TGI on Gaudi and Gaudi2 with [Optimum Habana](https://huggingface.co/docs/optimum/habana/index).
|
text-generation-inference/docs/source/installation_gaudi.md/0
|
{
"file_path": "text-generation-inference/docs/source/installation_gaudi.md",
"repo_id": "text-generation-inference",
"token_count": 75
}
| 227
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 15,
"logprob": null,
"text": ","
},
{
"id": 1669,
"logprob": -5.4453125,
"text": " il"
},
{
"id": 11580,
"logprob": -2.3378906,
"text": " faut"
},
{
"id": 3913,
"logprob": -4.3320312,
"text": " tout"
},
{
"id": 39261,
"logprob": -2.9160156,
"text": " d'abord"
}
],
"seed": 0,
"tokens": [
{
"id": 408,
"logprob": -0.16687012,
"special": false,
"text": " que"
},
{
"id": 366,
"logprob": -1.5517578,
"special": false,
"text": " la"
},
{
"id": 8769,
"logprob": -0.16687012,
"special": false,
"text": " personne"
},
{
"id": 1479,
"logprob": -2.1035156,
"special": false,
"text": " qui"
},
{
"id": 143926,
"logprob": -2.8671875,
"special": false,
"text": " rΓ©alise"
},
{
"id": 578,
"logprob": 0.0,
"special": false,
"text": " le"
},
{
"id": 8138,
"logprob": -0.66748047,
"special": false,
"text": " projet"
},
{
"id": 795,
"logprob": -1.6279297,
"special": false,
"text": " ne"
},
{
"id": 9802,
"logprob": -0.47875977,
"special": false,
"text": " soit"
},
{
"id": 1230,
"logprob": 0.0,
"special": false,
"text": " pas"
}
],
"top_tokens": null
},
"generated_text": "Pour dΓ©guster un ortolan, il faut tout d'abord que la personne qui rΓ©alise le projet ne soit pas"
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 1204
}
| 228
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50,
"logprob": null,
"text": "G"
},
{
"id": 330,
"logprob": -5.96875,
"text": "ir"
},
{
"id": 1622,
"logprob": -5.6132812,
"text": "af"
},
{
"id": 249,
"logprob": -6.5039062,
"text": "at"
},
{
"id": 1480,
"logprob": -8.078125,
"text": "ron"
},
{
"id": 304,
"logprob": -2.3261719,
"text": " is"
},
{
"id": 23866,
"logprob": -9.59375,
"text": " obsessed"
},
{
"id": 335,
"logprob": -0.048339844,
"text": " with"
},
{
"id": 26680,
"logprob": -4.0,
"text": " gir"
},
{
"id": 1903,
"logprob": -0.07556152,
"text": "aff"
},
{
"id": 255,
"logprob": -0.0067749023,
"text": "es"
},
{
"id": 23,
"logprob": -1.546875,
"text": ","
},
{
"id": 248,
"logprob": -4.3320312,
"text": " the"
},
{
"id": 758,
"logprob": -3.734375,
"text": " most"
},
{
"id": 21735,
"logprob": -5.109375,
"text": " glorious"
},
{
"id": 5985,
"logprob": -2.09375,
"text": " animal"
},
{
"id": 313,
"logprob": -1.1835938,
"text": " on"
},
{
"id": 248,
"logprob": -0.77685547,
"text": " the"
},
{
"id": 1936,
"logprob": -2.3828125,
"text": " face"
},
{
"id": 275,
"logprob": -0.004432678,
"text": " of"
},
{
"id": 414,
"logprob": -1.9677734,
"text": " this"
},
{
"id": 6490,
"logprob": -2.046875,
"text": " Earth"
},
{
"id": 25,
"logprob": -0.28198242,
"text": "."
},
{
"id": 401,
"logprob": -7.9179688,
"text": " G"
},
{
"id": 6013,
"logprob": -2.2753906,
"text": "ira"
},
{
"id": 694,
"logprob": -0.6230469,
"text": "ft"
},
{
"id": 1480,
"logprob": -0.20874023,
"text": "ron"
},
{
"id": 9369,
"logprob": -4.5507812,
"text": " believes"
},
{
"id": 455,
"logprob": -4.5664062,
"text": " all"
},
{
"id": 599,
"logprob": -2.7402344,
"text": " other"
},
{
"id": 5632,
"logprob": -0.21948242,
"text": " animals"
},
{
"id": 362,
"logprob": -0.7675781,
"text": " are"
},
{
"id": 23981,
"logprob": -5.0,
"text": " irrelevant"
},
{
"id": 635,
"logprob": -4.234375,
"text": " when"
},
{
"id": 4354,
"logprob": -0.5131836,
"text": " compared"
},
{
"id": 271,
"logprob": -0.103637695,
"text": " to"
},
{
"id": 248,
"logprob": -0.58447266,
"text": " the"
},
{
"id": 21735,
"logprob": -3.6835938,
"text": " glorious"
},
{
"id": 64398,
"logprob": -1.8173828,
"text": " majesty"
},
{
"id": 275,
"logprob": -0.23510742,
"text": " of"
},
{
"id": 248,
"logprob": -0.35473633,
"text": " the"
},
{
"id": 26680,
"logprob": -0.24633789,
"text": " gir"
},
{
"id": 23226,
"logprob": -0.02960205,
"text": "affe"
},
{
"id": 25,
"logprob": -0.17333984,
"text": "."
},
{
"id": 193,
"logprob": -1.3935547,
"text": "\n"
},
{
"id": 23626,
"logprob": -10.0625,
"text": "Daniel"
},
{
"id": 37,
"logprob": -4.59375,
"text": ":"
},
{
"id": 23090,
"logprob": -6.9375,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.99365234,
"text": ","
},
{
"id": 29033,
"logprob": -2.2324219,
"text": " Gir"
},
{
"id": 1622,
"logprob": -0.10809326,
"text": "af"
},
{
"id": 249,
"logprob": -0.042663574,
"text": "at"
},
{
"id": 1480,
"logprob": -0.0024776459,
"text": "ron"
},
{
"id": 12,
"logprob": -1.4277344,
"text": "!"
},
{
"id": 193,
"logprob": -1.1015625,
"text": "\n"
},
{
"id": 50,
"logprob": -0.05709839,
"text": "G"
},
{
"id": 330,
"logprob": -0.13208008,
"text": "ir"
},
{
"id": 1622,
"logprob": -0.0071487427,
"text": "af"
},
{
"id": 249,
"logprob": -0.008468628,
"text": "at"
},
{
"id": 1480,
"logprob": -0.00068998337,
"text": "ron"
},
{
"id": 37,
"logprob": -0.0074691772,
"text": ":"
}
],
"seed": null,
"tokens": [
{
"id": 23090,
"logprob": -1.8251953,
"special": false,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.3173828,
"special": false,
"text": ","
},
{
"id": 8156,
"logprob": -0.23803711,
"special": false,
"text": " Daniel"
},
{
"id": 12,
"logprob": -0.56933594,
"special": false,
"text": "!"
},
{
"id": 193,
"logprob": -0.61279297,
"special": false,
"text": "\n"
},
{
"id": 23626,
"logprob": -0.41967773,
"special": false,
"text": "Daniel"
},
{
"id": 37,
"logprob": -0.0023403168,
"special": false,
"text": ":"
},
{
"id": 1634,
"logprob": -2.0605469,
"special": false,
"text": " What"
},
{
"id": 18,
"logprob": -1.5292969,
"special": false,
"text": "'"
},
{
"id": 94,
"logprob": -0.007904053,
"special": false,
"text": "s"
}
]
},
"generated_text": " Hello, Daniel!\nDaniel: What's"
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon.json",
"repo_id": "text-generation-inference",
"token_count": 4604
}
| 229
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 806,
"logprob": -11.890625,
"text": "Wh"
},
{
"id": 1446,
"logprob": -3.6699219,
"text": "ats"
},
{
"id": 2921,
"logprob": -7.8203125,
"text": "Go"
},
{
"id": 468,
"logprob": -8.0703125,
"text": "og"
},
{
"id": 793,
"logprob": -2.1875,
"text": "les"
},
{
"id": 16332,
"logprob": -9.7109375,
"text": "DNS"
}
],
"seed": null,
"tokens": [
{
"id": 29946,
"logprob": -1.4765625,
"special": false,
"text": "4"
},
{
"id": 29906,
"logprob": -0.9199219,
"special": false,
"text": "2"
},
{
"id": 29889,
"logprob": 0.0,
"special": false,
"text": "."
},
{
"id": 29896,
"logprob": -1.1367188,
"special": false,
"text": "1"
},
{
"id": 29889,
"logprob": -1.4648438,
"special": false,
"text": "."
},
{
"id": 29896,
"logprob": -0.40722656,
"special": false,
"text": "1"
},
{
"id": 29889,
"logprob": -0.17419434,
"special": false,
"text": "."
},
{
"id": 29896,
"logprob": -0.20251465,
"special": false,
"text": "1"
},
{
"id": 29900,
"logprob": -1.5527344,
"special": false,
"text": "0"
},
{
"id": 29896,
"logprob": -1.3710938,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": "42.1.1.101"
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_regex.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_regex.json",
"repo_id": "text-generation-inference",
"token_count": 1292
}
| 230
|
{
"details": {
"finish_reason": "length",
"generated_tokens": 40,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -0.31347656,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.27441406,
"special": false,
"text": "\n"
},
{
"id": 28737,
"logprob": -2.2285156,
"special": false,
"text": "I"
},
{
"id": 28809,
"logprob": -1.4677734,
"special": false,
"text": "β"
},
{
"id": 28719,
"logprob": -0.31762695,
"special": false,
"text": "m"
},
{
"id": 264,
"logprob": -1.6865234,
"special": false,
"text": " a"
},
{
"id": 1215,
"logprob": -3.2695312,
"special": false,
"text": " very"
},
{
"id": 20640,
"logprob": -3.1230469,
"special": false,
"text": " passionate"
},
{
"id": 1338,
"logprob": -0.48339844,
"special": false,
"text": " person"
},
{
"id": 28723,
"logprob": -0.9970703,
"special": false,
"text": "."
},
{
"id": 315,
"logprob": -0.5498047,
"special": false,
"text": " I"
},
{
"id": 28809,
"logprob": -1.1923828,
"special": false,
"text": "β"
},
{
"id": 28719,
"logprob": -0.080444336,
"special": false,
"text": "m"
},
{
"id": 1215,
"logprob": -1.8271484,
"special": false,
"text": " very"
},
{
"id": 12215,
"logprob": -2.8847656,
"special": false,
"text": " driven"
},
{
"id": 28723,
"logprob": -1.0927734,
"special": false,
"text": "."
},
{
"id": 315,
"logprob": -0.4584961,
"special": false,
"text": " I"
},
{
"id": 28809,
"logprob": -0.5019531,
"special": false,
"text": "β"
},
{
"id": 28719,
"logprob": -0.030715942,
"special": false,
"text": "m"
},
{
"id": 1215,
"logprob": -0.96972656,
"special": false,
"text": " very"
},
{
"id": 7798,
"logprob": -2.8847656,
"special": false,
"text": " determined"
},
{
"id": 28723,
"logprob": -0.27319336,
"special": false,
"text": "."
},
{
"id": 13,
"logprob": -0.56396484,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.011016846,
"special": false,
"text": "\n"
},
{
"id": 3195,
"logprob": -0.7163086,
"special": false,
"text": "What"
},
{
"id": 349,
"logprob": -1.1611328,
"special": false,
"text": " is"
},
{
"id": 574,
"logprob": -0.515625,
"special": false,
"text": " your"
},
{
"id": 6656,
"logprob": -1.0253906,
"special": false,
"text": " favorite"
},
{
"id": 1970,
"logprob": -2.1738281,
"special": false,
"text": " thing"
},
{
"id": 684,
"logprob": -0.48364258,
"special": false,
"text": " about"
},
{
"id": 1250,
"logprob": -1.8876953,
"special": false,
"text": " being"
},
{
"id": 264,
"logprob": -0.41967773,
"special": false,
"text": " a"
},
{
"id": 8626,
"logprob": -2.9160156,
"special": false,
"text": " teacher"
},
{
"id": 28804,
"logprob": -0.11920166,
"special": false,
"text": "?"
},
{
"id": 13,
"logprob": -0.023727417,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.010848999,
"special": false,
"text": "\n"
},
{
"id": 28737,
"logprob": -1.0566406,
"special": false,
"text": "I"
},
{
"id": 2016,
"logprob": -0.7163086,
"special": false,
"text": " love"
},
{
"id": 272,
"logprob": -1.9169922,
"special": false,
"text": " the"
},
{
"id": 1639,
"logprob": -2.03125,
"special": false,
"text": " fact"
}
]
},
"generated_text": "\n\nIβm a very passionate person. Iβm very driven. Iβm very determined.\n\nWhat is your favorite thing about being a teacher?\n\nI love the fact"
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_lora_mistral/test_lora_mistral_without_customer_support_adapter.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_lora_mistral/test_lora_mistral_without_customer_support_adapter.json",
"repo_id": "text-generation-inference",
"token_count": 3126
}
| 231
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 7,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 3,
"logprob": -0.7001953,
"special": false,
"text": " "
},
{
"id": 18,
"logprob": -1.1943359,
"special": false,
"text": "-"
},
{
"id": 26937,
"logprob": -1.2099609,
"special": false,
"text": "196"
},
{
"id": 3,
"logprob": -1.2451172,
"special": false,
"text": " "
},
{
"id": 1956,
"logprob": -0.3322754,
"special": false,
"text": "Β°"
},
{
"id": 254,
"logprob": -0.19213867,
"special": false,
"text": "C"
},
{
"id": 1,
"logprob": -0.030151367,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "-196 Β°C"
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json",
"repo_id": "text-generation-inference",
"token_count": 680
}
| 232
|
import pytest
@pytest.fixture(scope="module")
def flash_gemma2_handle(launcher):
with launcher("google/gemma-2-9b-it", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_gemma2(flash_gemma2_handle):
await flash_gemma2_handle.health(300)
return flash_gemma2_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_gemma2(flash_gemma2, response_snapshot):
response = await flash_gemma2.generate(
"<start_of_turn>user:\nWrite a poem to help me remember the first 10 elements on the periodic table, giving each element its own line.<end_of_turn>\n<start_of_turn>model:\n",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.generated_text == "**Hydrogen**, light and free,\n**He"
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_gemma2_load(flash_gemma2, generate_load, response_snapshot):
responses = await generate_load(
flash_gemma2,
"<start_of_turn>user:\nWrite a poem to help me remember the first 10 elements on the periodic table, giving each element its own line.<end_of_turn>\n<start_of_turn>model:\n",
max_new_tokens=10,
n=4,
)
assert responses[0].generated_text == "**Hydrogen**, light and free,\n**He"
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
|
text-generation-inference/integration-tests/models/test_flash_gemma2.py/0
|
{
"file_path": "text-generation-inference/integration-tests/models/test_flash_gemma2.py",
"repo_id": "text-generation-inference",
"token_count": 602
}
| 233
|
import pytest
@pytest.fixture(scope="module")
def flash_qwen2_handle(launcher):
with launcher("Qwen/Qwen1.5-0.5B") as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_qwen2(flash_qwen2_handle):
await flash_qwen2_handle.health(300)
return flash_qwen2_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_qwen2(flash_qwen2, response_snapshot):
response = await flash_qwen2.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response.generated_text == "\n# Create a request\nrequest = requests.get"
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_qwen2_all_params(flash_qwen2, response_snapshot):
response = await flash_qwen2.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_qwen2_load(flash_qwen2, generate_load, response_snapshot):
responses = await generate_load(flash_qwen2, "Test request", max_new_tokens=10, n=4)
assert len(responses) == 4
assert all(
[r.generated_text == responses[0].generated_text for r in responses]
), f"{[r.generated_text for r in responses]}"
assert responses[0].generated_text == "\n# Create a request\nrequest = requests.get"
assert responses == response_snapshot
|
text-generation-inference/integration-tests/models/test_flash_qwen2.py/0
|
{
"file_path": "text-generation-inference/integration-tests/models/test_flash_qwen2.py",
"repo_id": "text-generation-inference",
"token_count": 747
}
| 234
|
import pytest
@pytest.fixture(scope="module")
def opt_sharded_handle(launcher):
with launcher("facebook/opt-6.7b", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def opt_sharded(opt_sharded_handle):
await opt_sharded_handle.health(300)
return opt_sharded_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_opt(opt_sharded):
pass
|
text-generation-inference/integration-tests/models/test_opt.py/0
|
{
"file_path": "text-generation-inference/integration-tests/models/test_opt.py",
"repo_id": "text-generation-inference",
"token_count": 160
}
| 235
|
{
nix-filter,
buildPythonPackage,
poetry-core,
mypy-protobuf,
awq-inference-engine,
causal-conv1d,
eetq,
einops,
exllamav2,
fbgemm-gpu,
flashinfer,
flash-attn,
flash-attn-layer-norm,
flash-attn-rotary,
grpc-interceptor,
grpcio-reflection,
grpcio-status,
grpcio-tools,
hf-transfer,
loguru,
mamba-ssm,
marlin-kernels,
opentelemetry-api,
opentelemetry-exporter-otlp,
opentelemetry-instrumentation-grpc,
opentelemetry-semantic-conventions,
peft,
safetensors,
tokenizers,
torch,
sentencepiece,
transformers,
typer,
vllm,
}:
let
filter = nix-filter.lib;
in
buildPythonPackage {
name = "text-generation-server";
src = filter {
root = ../.;
include = with filter; [
isDirectory
(and (inDirectory "server") (or_ (matchExt "py") (matchExt "pyi")))
"server/pyproject.toml"
(and (inDirectory "proto/v3") (matchExt "proto"))
];
};
pyproject = true;
build-system = [ poetry-core ];
nativeBuildInputs = [ mypy-protobuf ];
pythonRelaxDeps = [
"einops"
"huggingface-hub"
"loguru"
"opentelemetry-instrumentation-grpc"
"sentencepiece"
"typer"
];
pythonRemoveDeps = [ "scipy" ];
dependencies = [
awq-inference-engine
eetq
causal-conv1d
einops
exllamav2
fbgemm-gpu
flashinfer
flash-attn
flash-attn-layer-norm
flash-attn-rotary
grpc-interceptor
grpcio-reflection
grpcio-status
grpcio-tools
hf-transfer
loguru
mamba-ssm
marlin-kernels
opentelemetry-api
opentelemetry-exporter-otlp
opentelemetry-instrumentation-grpc
opentelemetry-semantic-conventions
peft
safetensors
sentencepiece
tokenizers
transformers
typer
vllm
];
prePatch = ''
python -m grpc_tools.protoc -Iproto/v3 --python_out=server/text_generation_server/pb \
--grpc_python_out=server/text_generation_server/pb --mypy_out=server/text_generation_server/pb proto/v3/generate.proto
find server/text_generation_server/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \;
touch server/text_generation_server/pb/__init__.py
cd server
'';
}
|
text-generation-inference/nix/server.nix/0
|
{
"file_path": "text-generation-inference/nix/server.nix",
"repo_id": "text-generation-inference",
"token_count": 980
}
| 236
|
use axum::http::HeaderValue;
use clap::Parser;
use clap::Subcommand;
use hf_hub::api::tokio::{Api, ApiBuilder, ApiRepo};
use hf_hub::{Cache, Repo, RepoType};
use opentelemetry::sdk::propagation::TraceContextPropagator;
use opentelemetry::sdk::trace;
use opentelemetry::sdk::trace::Sampler;
use opentelemetry::sdk::Resource;
use opentelemetry::{global, KeyValue};
use opentelemetry_otlp::WithExportConfig;
use std::fs::File;
use std::io::BufReader;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::path::{Path, PathBuf};
use text_generation_router::config::Config;
use text_generation_router::usage_stats;
use text_generation_router::{
server, HubModelInfo, HubPreprocessorConfig, HubProcessorConfig, HubTokenizerConfig,
};
use thiserror::Error;
use tokenizers::{processors::template::TemplateProcessing, Tokenizer};
use tower_http::cors::AllowOrigin;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::{filter::LevelFilter, EnvFilter, Layer};
/// App Configuration
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
#[command(subcommand)]
command: Option<Commands>,
#[clap(default_value = "128", long, env)]
max_concurrent_requests: usize,
#[clap(default_value = "2", long, env)]
max_best_of: usize,
#[clap(default_value = "4", long, env)]
max_stop_sequences: usize,
#[clap(default_value = "5", long, env)]
max_top_n_tokens: u32,
#[clap(default_value = "1024", long, env)]
max_input_tokens: usize,
#[clap(default_value = "2048", long, env)]
max_total_tokens: usize,
#[clap(default_value = "1.2", long, env)]
waiting_served_ratio: f32,
#[clap(default_value = "4096", long, env)]
max_batch_prefill_tokens: u32,
#[clap(long, env)]
max_batch_total_tokens: Option<u32>,
#[clap(default_value = "20", long, env)]
max_waiting_tokens: usize,
#[clap(long, env)]
max_batch_size: Option<usize>,
#[clap(default_value = "0.0.0.0", long, env)]
hostname: String,
#[clap(default_value = "3000", long, short, env)]
port: u16,
#[clap(default_value = "/tmp/text-generation-server-0", long, env)]
master_shard_uds_path: String,
#[clap(default_value = "bigscience/bloom", long, env)]
tokenizer_name: String,
#[clap(long, env)]
tokenizer_config_path: Option<String>,
#[clap(long, env)]
revision: Option<String>,
#[clap(default_value = "2", long, env)]
validation_workers: usize,
#[clap(long, env)]
json_output: bool,
#[clap(long, env)]
otlp_endpoint: Option<String>,
#[clap(default_value = "text-generation-inference.router", long, env)]
otlp_service_name: String,
#[clap(long, env)]
cors_allow_origin: Option<Vec<String>>,
#[clap(long, env)]
api_key: Option<String>,
#[clap(long, env)]
ngrok: bool,
#[clap(long, env)]
ngrok_authtoken: Option<String>,
#[clap(long, env)]
ngrok_edge: Option<String>,
#[clap(long, env, default_value_t = false)]
messages_api_enabled: bool,
#[clap(long, env, default_value_t = false)]
disable_grammar_support: bool,
#[clap(default_value = "4", long, env)]
max_client_batch_size: usize,
#[clap(long, env, default_value_t)]
disable_usage_stats: bool,
#[clap(long, env, default_value_t)]
disable_crash_reports: bool,
}
#[derive(Debug, Subcommand)]
enum Commands {
PrintSchema,
}
#[tokio::main]
async fn main() -> Result<(), RouterError> {
let args = Args::parse();
// Pattern match configuration
let Args {
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_tokens,
max_total_tokens,
waiting_served_ratio,
max_batch_prefill_tokens,
max_batch_total_tokens,
max_waiting_tokens,
max_batch_size,
hostname,
port,
master_shard_uds_path,
tokenizer_name,
tokenizer_config_path,
revision,
validation_workers,
json_output,
otlp_endpoint,
otlp_service_name,
cors_allow_origin,
api_key,
ngrok,
ngrok_authtoken,
ngrok_edge,
messages_api_enabled,
disable_grammar_support,
max_client_batch_size,
disable_usage_stats,
disable_crash_reports,
command,
} = args;
let print_schema_command = match command {
Some(Commands::PrintSchema) => true,
None => {
// only init logging if we are not running the print schema command
init_logging(otlp_endpoint, otlp_service_name, json_output);
false
}
};
// Validate args
if max_input_tokens >= max_total_tokens {
return Err(RouterError::ArgumentValidation(
"`max_input_tokens` must be < `max_total_tokens`".to_string(),
));
}
if max_input_tokens as u32 > max_batch_prefill_tokens {
return Err(RouterError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be >= `max_input_tokens`. Given: {max_batch_prefill_tokens} and {max_input_tokens}")));
}
if validation_workers == 0 {
return Err(RouterError::ArgumentValidation(
"`validation_workers` must be > 0".to_string(),
));
}
if let Some(ref max_batch_total_tokens) = max_batch_total_tokens {
if max_batch_prefill_tokens > *max_batch_total_tokens {
return Err(RouterError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be <= `max_batch_total_tokens`. Given: {max_batch_prefill_tokens} and {max_batch_total_tokens}")));
}
if max_total_tokens as u32 > *max_batch_total_tokens {
return Err(RouterError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_batch_total_tokens}")));
}
}
// CORS allowed origins
// map to go inside the option and then map to parse from String to HeaderValue
// Finally, convert to AllowOrigin
let cors_allow_origin: Option<AllowOrigin> = cors_allow_origin.map(|cors_allow_origin| {
AllowOrigin::list(
cors_allow_origin
.iter()
.map(|origin| origin.parse::<HeaderValue>().unwrap()),
)
});
// Parse Huggingface hub token
let authorization_token = std::env::var("HF_TOKEN")
.or_else(|_| std::env::var("HUGGING_FACE_HUB_TOKEN"))
.ok();
// Tokenizer instance
// This will only be used to validate payloads
let local_path = Path::new(&tokenizer_name);
// Shared API builder initialization
let api_builder = || {
let mut builder = ApiBuilder::new()
.with_progress(false)
.with_token(authorization_token);
if let Ok(cache_dir) = std::env::var("HUGGINGFACE_HUB_CACHE") {
builder = builder.with_cache_dir(cache_dir.into());
}
builder
};
// Decide if we need to use the API based on the revision and local path
let use_api = revision.is_some() || !local_path.exists() || !local_path.is_dir();
// Initialize API if needed
#[derive(Clone)]
enum Type {
Api(Api),
Cache(Cache),
None,
}
let api = if use_api {
if std::env::var("HF_HUB_OFFLINE") == Ok("1".to_string()) {
let cache = std::env::var("HUGGINGFACE_HUB_CACHE")
.map_err(|_| ())
.map(|cache_dir| Cache::new(cache_dir.into()))
.unwrap_or_else(|_| Cache::default());
tracing::warn!("Offline mode active using cache defaults");
Type::Cache(cache)
} else {
tracing::info!("Using the Hugging Face API");
match api_builder().build() {
Ok(api) => Type::Api(api),
Err(_) => {
tracing::warn!("Unable to build the Hugging Face API");
Type::None
}
}
}
} else {
Type::None
};
// Load tokenizer and model info
let (
tokenizer_filename,
config_filename,
tokenizer_config_filename,
preprocessor_config_filename,
processor_config_filename,
model_info,
) = match api {
Type::None => (
Some(local_path.join("tokenizer.json")),
Some(local_path.join("config.json")),
Some(local_path.join("tokenizer_config.json")),
Some(local_path.join("preprocessor_config.json")),
Some(local_path.join("processor_config.json")),
None,
),
Type::Api(api) => {
let api_repo = api.repo(Repo::with_revision(
tokenizer_name.to_string(),
RepoType::Model,
revision.clone().unwrap_or_else(|| "main".to_string()),
));
let tokenizer_filename = match api_repo.get("tokenizer.json").await {
Ok(tokenizer_filename) => Some(tokenizer_filename),
Err(_) => get_base_tokenizer(&api, &api_repo).await,
};
let config_filename = api_repo.get("config.json").await.ok();
let tokenizer_config_filename = api_repo.get("tokenizer_config.json").await.ok();
let preprocessor_config_filename = api_repo.get("preprocessor_config.json").await.ok();
let processor_config_filename = api_repo.get("processor_config.json").await.ok();
let model_info = if let Some(model_info) = get_model_info(&api_repo).await {
Some(model_info)
} else {
tracing::warn!("Could not retrieve model info from the Hugging Face hub.");
None
};
(
tokenizer_filename,
config_filename,
tokenizer_config_filename,
preprocessor_config_filename,
processor_config_filename,
model_info,
)
}
Type::Cache(cache) => {
let repo = cache.repo(Repo::with_revision(
tokenizer_name.to_string(),
RepoType::Model,
revision.clone().unwrap_or_else(|| "main".to_string()),
));
(
repo.get("tokenizer.json"),
repo.get("config.json"),
repo.get("tokenizer_config.json"),
repo.get("preprocessor_config.json"),
repo.get("processor_config.json"),
None,
)
}
};
let config: Option<Config> = config_filename.and_then(|filename| {
std::fs::read_to_string(filename)
.ok()
.as_ref()
.and_then(|c| {
let config: Result<Config, _> = serde_json::from_str(c);
if let Err(err) = &config {
tracing::warn!("Could not parse config {err:?}");
}
config.ok()
})
});
let model_info = model_info.unwrap_or_else(|| HubModelInfo {
model_id: tokenizer_name.to_string(),
sha: None,
pipeline_tag: None,
});
// Read the JSON contents of the file as an instance of 'HubTokenizerConfig'.
let tokenizer_config: Option<HubTokenizerConfig> = if let Some(filename) = tokenizer_config_path
{
HubTokenizerConfig::from_file(filename)
} else {
tokenizer_config_filename.and_then(HubTokenizerConfig::from_file)
};
let tokenizer_config = tokenizer_config.unwrap_or_else(|| {
tracing::warn!("Could not find tokenizer config locally and no API specified");
HubTokenizerConfig::default()
});
let tokenizer_class = tokenizer_config.tokenizer_class.clone();
let tokenizer: Option<Tokenizer> = tokenizer_filename.and_then(|filename| {
let mut tokenizer = Tokenizer::from_file(filename).ok();
if let Some(tokenizer) = &mut tokenizer {
if let Some(class) = &tokenizer_config.tokenizer_class {
if class == "LlamaTokenizer" || class == "LlamaTokenizerFast"{
if let Ok(post_processor) = create_post_processor(tokenizer, &tokenizer_config) {
tracing::info!("Overriding LlamaTokenizer with TemplateProcessing to follow python override defined in https://github.com/huggingface/transformers/blob/4aa17d00690b7f82c95bb2949ea57e22c35b4336/src/transformers/models/llama/tokenization_llama_fast.py#L203-L205");
tokenizer.with_post_processor(post_processor);
}
}
}
}
tokenizer
});
let preprocessor_config =
preprocessor_config_filename.and_then(HubPreprocessorConfig::from_file);
let processor_config = processor_config_filename
.and_then(HubProcessorConfig::from_file)
.unwrap_or_default();
tracing::info!("Using config {config:?}");
if tokenizer.is_none() {
tracing::warn!("Could not find a fast tokenizer implementation for {tokenizer_name}");
tracing::warn!("Rust input length validation and truncation is disabled");
}
// if pipeline-tag == text-generation we default to return_full_text = true
let compat_return_full_text = match &model_info.pipeline_tag {
None => {
tracing::warn!("no pipeline tag found for model {tokenizer_name}");
true
}
Some(pipeline_tag) => pipeline_tag.as_str() == "text-generation",
};
// Determine the server port based on the feature and environment variable.
let port = if cfg!(feature = "google") {
std::env::var("AIP_HTTP_PORT")
.map(|aip_http_port| aip_http_port.parse::<u16>().unwrap_or(port))
.unwrap_or(port)
} else {
port
};
let addr = match hostname.parse() {
Ok(ip) => SocketAddr::new(ip, port),
Err(_) => {
tracing::warn!("Invalid hostname, defaulting to 0.0.0.0");
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port)
}
};
// Only send usage stats when TGI is run in container and the function returns Some
let is_container = matches!(usage_stats::is_container(), Ok(true));
let user_agent = if !disable_usage_stats && is_container {
let reduced_args = usage_stats::Args::new(
config.clone(),
tokenizer_class,
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_tokens,
max_total_tokens,
waiting_served_ratio,
max_batch_prefill_tokens,
max_batch_total_tokens,
max_waiting_tokens,
max_batch_size,
revision,
validation_workers,
messages_api_enabled,
disable_grammar_support,
max_client_batch_size,
disable_usage_stats,
disable_crash_reports,
);
Some(usage_stats::UserAgent::new(reduced_args))
} else {
None
};
if let Some(ref ua) = user_agent {
let start_event =
usage_stats::UsageStatsEvent::new(ua.clone(), usage_stats::EventType::Start, None);
tokio::spawn(async move {
start_event.send().await;
});
};
// Run server
let result = server::run(
master_shard_uds_path,
model_info,
compat_return_full_text,
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_tokens,
max_total_tokens,
waiting_served_ratio,
max_batch_prefill_tokens,
max_batch_total_tokens,
max_waiting_tokens,
max_batch_size,
tokenizer,
config,
validation_workers,
addr,
cors_allow_origin,
api_key,
ngrok,
ngrok_authtoken,
ngrok_edge,
tokenizer_config,
preprocessor_config,
processor_config,
messages_api_enabled,
disable_grammar_support,
max_client_batch_size,
print_schema_command,
)
.await;
match result {
Ok(_) => {
if let Some(ref ua) = user_agent {
let stop_event = usage_stats::UsageStatsEvent::new(
ua.clone(),
usage_stats::EventType::Stop,
None,
);
stop_event.send().await;
};
Ok(())
}
Err(e) => {
if let Some(ref ua) = user_agent {
if !disable_crash_reports {
let error_event = usage_stats::UsageStatsEvent::new(
ua.clone(),
usage_stats::EventType::Error,
Some(e.to_string()),
);
error_event.send().await;
} else {
let unknow_error_event = usage_stats::UsageStatsEvent::new(
ua.clone(),
usage_stats::EventType::Error,
Some("unknow_error".to_string()),
);
unknow_error_event.send().await;
}
};
Err(RouterError::WebServer(e))
}
}
}
/// Init logging using env variables LOG_LEVEL and LOG_FORMAT:
/// - otlp_endpoint is an optional URL to an Open Telemetry collector
/// - otlp_service_name service name to appear in APM
/// - LOG_LEVEL may be TRACE, DEBUG, INFO, WARN or ERROR (default to INFO)
/// - LOG_FORMAT may be TEXT or JSON (default to TEXT)
/// - LOG_COLORIZE may be "false" or "true" (default to "true" or ansi supported platforms)
fn init_logging(otlp_endpoint: Option<String>, otlp_service_name: String, json_output: bool) {
let mut layers = Vec::new();
// STDOUT/STDERR layer
let ansi = std::env::var("LOG_COLORIZE") != Ok("1".to_string());
let fmt_layer = tracing_subscriber::fmt::layer()
.with_file(true)
.with_ansi(ansi)
.with_line_number(true);
let fmt_layer = match json_output {
true => fmt_layer.json().flatten_event(true).boxed(),
false => fmt_layer.boxed(),
};
layers.push(fmt_layer);
// OpenTelemetry tracing layer
if let Some(otlp_endpoint) = otlp_endpoint {
global::set_text_map_propagator(TraceContextPropagator::new());
let tracer = opentelemetry_otlp::new_pipeline()
.tracing()
.with_exporter(
opentelemetry_otlp::new_exporter()
.tonic()
.with_endpoint(otlp_endpoint),
)
.with_trace_config(
trace::config()
.with_resource(Resource::new(vec![KeyValue::new(
"service.name",
otlp_service_name,
)]))
.with_sampler(Sampler::AlwaysOn),
)
.install_batch(opentelemetry::runtime::Tokio);
if let Ok(tracer) = tracer {
layers.push(tracing_opentelemetry::layer().with_tracer(tracer).boxed());
init_tracing_opentelemetry::init_propagator().unwrap();
};
}
// Filter events with LOG_LEVEL
let varname = "LOG_LEVEL";
let env_filter = if let Ok(log_level) = std::env::var(varname) {
// Override to avoid simple logs to be spammed with tokio level informations
let log_level = match &log_level[..] {
"warn" => "text_generation_launcher=warn,text_generation_router=warn",
"info" => "text_generation_launcher=info,text_generation_router=info",
"debug" => "text_generation_launcher=debug,text_generation_router=debug",
log_level => log_level,
};
EnvFilter::builder()
.with_default_directive(LevelFilter::INFO.into())
.parse_lossy(log_level)
} else {
EnvFilter::new("info")
};
tracing_subscriber::registry()
.with(env_filter)
.with(layers)
.init();
}
/// get model info from the Huggingface Hub
pub async fn get_model_info(api: &ApiRepo) -> Option<HubModelInfo> {
let response = api.info_request().send().await.ok()?;
if response.status().is_success() {
let hub_model_info: HubModelInfo =
serde_json::from_str(&response.text().await.ok()?).ok()?;
if let Some(sha) = &hub_model_info.sha {
tracing::info!(
"Serving revision {sha} of model {}",
hub_model_info.model_id
);
}
Some(hub_model_info)
} else {
None
}
}
/// get base tokenizer
pub async fn get_base_tokenizer(api: &Api, api_repo: &ApiRepo) -> Option<PathBuf> {
let config_filename = api_repo.get("config.json").await.ok()?;
// Open the file in read-only mode with buffer.
let file = File::open(config_filename).ok()?;
let reader = BufReader::new(file);
// Read the JSON contents of the file as an instance of `User`.
let config: serde_json::Value = serde_json::from_reader(reader).ok()?;
if let Some(serde_json::Value::String(base_model_id)) = config.get("base_model_name_or_path") {
let api_base_repo = api.repo(Repo::with_revision(
base_model_id.to_string(),
RepoType::Model,
"main".to_string(),
));
api_base_repo.get("tokenizer.json").await.ok()
} else {
None
}
}
/// get tokenizer_config from the Huggingface Hub
pub async fn get_tokenizer_config(api_repo: &ApiRepo) -> Option<HubTokenizerConfig> {
let tokenizer_config_filename = api_repo.get("tokenizer_config.json").await.ok()?;
// Open the file in read-only mode with buffer.
let file = File::open(tokenizer_config_filename).ok()?;
let reader = BufReader::new(file);
// Read the JSON contents of the file as an instance of 'HubTokenizerConfig'.
let tokenizer_config: HubTokenizerConfig = serde_json::from_reader(reader)
.map_err(|e| {
tracing::warn!("Unable to parse tokenizer config: {}", e);
e
})
.ok()?;
Some(tokenizer_config)
}
/// Create a post_processor for the LlamaTokenizer
pub fn create_post_processor(
tokenizer: &Tokenizer,
tokenizer_config: &HubTokenizerConfig,
) -> Result<TemplateProcessing, tokenizers::processors::template::TemplateProcessingBuilderError> {
let add_bos_token = tokenizer_config.add_bos_token.unwrap_or(true);
let add_eos_token = tokenizer_config.add_eos_token.unwrap_or(false);
let bos_token = tokenizer_config.bos_token.as_ref();
let eos_token = tokenizer_config.eos_token.as_ref();
if add_bos_token && bos_token.is_none() {
panic!("add_bos_token = true but bos_token is None");
}
if add_eos_token && eos_token.is_none() {
panic!("add_eos_token = true but eos_token is None");
}
let mut single = Vec::new();
let mut pair = Vec::new();
let mut special_tokens = Vec::new();
if add_bos_token {
if let Some(bos) = bos_token {
let bos_token_id = tokenizer
.token_to_id(bos.as_str())
.expect("Should have found the bos token id");
special_tokens.push((bos.as_str(), bos_token_id));
single.push(format!("{}:0", bos.as_str()));
pair.push(format!("{}:0", bos.as_str()));
}
}
single.push("$A:0".to_string());
pair.push("$A:0".to_string());
if add_eos_token {
if let Some(eos) = eos_token {
let eos_token_id = tokenizer
.token_to_id(eos.as_str())
.expect("Should have found the eos token id");
special_tokens.push((eos.as_str(), eos_token_id));
single.push(format!("{}:0", eos.as_str()));
pair.push(format!("{}:0", eos.as_str()));
}
}
if add_bos_token {
if let Some(bos) = bos_token {
pair.push(format!("{}:1", bos.as_str()));
}
}
pair.push("$B:1".to_string());
if add_eos_token {
if let Some(eos) = eos_token {
pair.push(format!("{}:1", eos.as_str()));
}
}
let post_processor = TemplateProcessing::builder()
.try_single(single)?
.try_pair(pair)?
.special_tokens(special_tokens)
.build()?;
Ok(post_processor)
}
#[derive(Debug, Error)]
enum RouterError {
#[error("Argument validation error: {0}")]
ArgumentValidation(String),
#[error("WebServer error: {0}")]
WebServer(#[from] server::WebServerError),
#[error("Tokio runtime failed to start: {0}")]
Tokio(#[from] std::io::Error),
}
#[cfg(test)]
mod tests {
use super::*;
use text_generation_router::TokenizerConfigToken;
#[test]
fn test_create_post_processor() {
let tokenizer_config = HubTokenizerConfig {
add_bos_token: None,
add_eos_token: None,
bos_token: Some(TokenizerConfigToken::String("<s>".to_string())),
eos_token: Some(TokenizerConfigToken::String("</s>".to_string())),
chat_template: None,
tokenizer_class: None,
completion_template: None,
};
let tokenizer =
Tokenizer::from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", None).unwrap();
let post_processor = create_post_processor(&tokenizer, &tokenizer_config).unwrap();
let expected = TemplateProcessing::builder()
.try_single("<s>:0 $A:0")
.unwrap()
.try_pair("<s>:0 $A:0 <s>:1 $B:1")
.unwrap()
.special_tokens(vec![("<s>".to_string(), 1)])
.build()
.unwrap();
assert_eq!(post_processor, expected);
}
}
|
text-generation-inference/router/src/main.rs.back/0
|
{
"file_path": "text-generation-inference/router/src/main.rs.back",
"repo_id": "text-generation-inference",
"token_count": 12333
}
| 237
|
selective_scan_commit := 2a3704fd47ba817b415627b06fd796b971fdc137
causal-conv1d:
rm -rf causal-conv1d
git clone https://github.com/Dao-AILab/causal-conv1d.git
build-causal-conv1d: causal-conv1d
cd causal-conv1d/ && git checkout v1.1.1 # known latest working version tag
cd causal-conv1d/ && CAUSAL_CONV1D_FORCE_BUILD=TRUE python setup.py build
install-causal-conv1d: build-causal-conv1d
pip uninstall causal-conv1d -y || true
cd causal-conv1d/ && pip install .
# selective-scan dependends on causal-conv1d
selective-scan:
rm -rf mamba
git clone https://github.com/state-spaces/mamba.git mamba
build-selective-scan: selective-scan
cd mamba/ && git fetch && git checkout $(selective_scan_commit)
cd mamba && python setup.py build
install-selective-scan: install-causal-conv1d build-selective-scan
pip uninstall selective-scan-cuda -y || true
cd mamba && pip install .
build-all: build-causal-conv1d build-selective-scan
|
text-generation-inference/server/Makefile-selective-scan/0
|
{
"file_path": "text-generation-inference/server/Makefile-selective-scan",
"repo_id": "text-generation-inference",
"token_count": 351
}
| 238
|
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#ifndef _hip_compat_cuh
#define _hip_compat_cuh
// Workaround for a bug in hipamd, backported from upstream, this is fixed in ROCm 5.6.
__device__ __forceinline__ __half __compat_hrcp(__half x) {
return __half_raw{
static_cast<_Float16>(__builtin_amdgcn_rcph(static_cast<__half_raw>(x).data))};
}
__device__ __forceinline__ __half2 __compat_h2rcp(__half2 x) {
return _Float16_2{
_Float16_2{static_cast<_Float16>(1.0f),
static_cast<_Float16>(1.0f)} / x.data};
}
#define hrcp __compat_hrcp
#define h2rcp __compat_h2rcp
// Automatic conversion of hipblasHgemm doesn't convert half to hipblasHalf.
__host__ __forceinline__ hipblasStatus_t __compat_hipblasHgemm(hipblasHandle_t handle,
hipblasOperation_t transA,
hipblasOperation_t transB,
int m,
int n,
int k,
const half* alpha,
const half* AP,
int lda,
const half* BP,
int ldb,
const half* beta,
half* CP,
int ldc) {
return hipblasHgemm(handle, transA, transB, m, n, k,
reinterpret_cast<const hipblasHalf *>(alpha),
reinterpret_cast<const hipblasHalf *>(AP), lda,
reinterpret_cast<const hipblasHalf *>(BP), ldb,
reinterpret_cast<const hipblasHalf *>(beta),
reinterpret_cast<hipblasHalf *>(CP), ldc);
}
#define hipblasHgemm __compat_hipblasHgemm
// Previous version of PyTorch were converting to rocBLAS instead of hipBLAS.
#define rocblas_handle hipblasHandle_t
#define rocblas_operation_none HIPBLAS_OP_N
#define rocblas_get_stream hipblasGetStream
#define rocblas_set_stream hipblasSetStream
#define rocblas_hgemm __compat_hipblasHgemm
#endif
|
text-generation-inference/server/exllama_kernels/exllama_kernels/hip_compat.cuh/0
|
{
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/hip_compat.cuh",
"repo_id": "text-generation-inference",
"token_count": 1710
}
| 239
|
#ifndef _qdq_3_cuh
#define _qdq_3_cuh
#include "qdq_util.cuh"
#include "../../config.h"
#if QMODE_3BIT == 1
// Permutation:
//
// v9997775 55333111 u8886664 44222000 (u, v lsb)
// vjjjhhhf ffdddbbb uiiiggge eecccaaa
// vtttrrrp ppnnnlll usssqqqo oommmkkk
__forceinline__ __device__ void shuffle_3bit_32
(
uint32_t* q,
int stride
)
{
uint32_t qa = q[0 * stride];
uint32_t qb = q[1 * stride];
uint32_t qc = q[2 * stride];
// qa: aa999888 77766655 54443332 22111000
// qb: lkkkjjji iihhhggg fffeeedd dcccbbba
// qc: vvvuuutt tsssrrrq qqpppooo nnnmmmll
uint32_t qd = qc >> 26;
qc <<= 4;
qc |= qb >> 28;
qb <<= 2;
qb |= qa >> 30;
// qa: ..999888 77766655 54443332 22111000
// qb: ..jjjiii hhhgggff feeedddc ccbbbaaa
// qc: ..tttsss rrrqqqpp pooonnnm mmlllkkk
// qd: vvvuuu
uint32_t za = 0;
uint32_t zb = 0;
uint32_t zc = 0;
for (int i = 0; i < 5; i++) { uint32_t t0 = qa & 0x07; uint32_t t1 = (qa & 0x38) >> 3; qa >>= 6; za |= (t0 << (i * 3)); za |= (t1 << (i * 3 + 16)); }
for (int i = 0; i < 5; i++) { uint32_t t0 = qb & 0x07; uint32_t t1 = (qb & 0x38) >> 3; qb >>= 6; zb |= (t0 << (i * 3)); zb |= (t1 << (i * 3 + 16)); }
for (int i = 0; i < 5; i++) { uint32_t t0 = qc & 0x07; uint32_t t1 = (qc & 0x38) >> 3; qc >>= 6; zc |= (t0 << (i * 3)); zc |= (t1 << (i * 3 + 16)); }
// za: 9997775 55333111 8886664 44222000
// zb: jjjhhhf ffdddbbb iiiggge eecccaaa
// zc: tttrrrp ppnnnlll sssqqqo oommmkkk
// qd: vvvuuu
za |= ((qd & 0x01) >> 0) << 15;
zb |= ((qd & 0x02) >> 1) << 15;
zc |= ((qd & 0x04) >> 2) << 15;
za |= ((qd & 0x08) >> 3) << 31;
zb |= ((qd & 0x10) >> 4) << 31;
zc |= ((qd & 0x20) >> 5) << 31;
// za: v9997775 55333111 u8886664 44222000 (u, v lsb)
// zb: vjjjhhhf ffdddbbb uiiiggge eecccaaa
// zc: vtttrrrp ppnnnlll usssqqqo oommmkkk
q[0 * stride] = za;
q[1 * stride] = zb;
q[2 * stride] = zc;
}
__forceinline__ __device__ void dequant_3bit_32
(
const uint32_t q_0,
const uint32_t q_1,
const uint32_t q_2,
half2 (&dq)[16],
int stride
)
{
const uint32_t c0 = 0x64006400;
const half y8_ = __float2half_rn(1.0f / 8.0f);
const half y64_ = __float2half_rn(1.0f / 64.0f);
const half2 y8 = __halves2half2(y8_, y8_);
const half2 y64 = __halves2half2(y64_, y64_);
const half z1_ = __float2half_rn(-1024.0f - 4.0f);
const half z8_ = __float2half_rn(-1024.0f / 8.0f - 4.0f);
const half z64_ = __float2half_rn(-1024.0f / 64.0f - 4.0f);
const half2 z1 = __halves2half2(z1_, z1_);
const half2 z8 = __halves2half2(z8_, z8_);
const half2 z64 = __halves2half2(z64_, z64_);
uint32_t qa = q_0;
uint32_t qb = q_1;
uint32_t qc = q_2;
half2_uint32 q0((qa & 0x00070007) | c0); // half2(q[ 0], q[ 1]) + 1024
half2_uint32 q1((qa & 0x00380038) | c0); // half2(q[ 2], q[ 3]) * 8 + 1024
qa >>= 6;
half2_uint32 q2((qa & 0x00070007) | c0); // half2(q[ 4], q[ 5]) + 1024
half2_uint32 q3((qa & 0x00380038) | c0); // half2(q[ 6], q[ 7]) * 8 + 1024
half2_uint32 q4((qa & 0x01c001c0) | c0); // half2(q[ 8], q[ 9]) * 64 + 1024
qa >>= 9;
qa &= 0x00010001;
half2_uint32 q5((qb & 0x00070007) | c0); // half2(q[10], q[11]) + 1024
half2_uint32 q6((qb & 0x00380038) | c0); // half2(q[12], q[13]) * 8 + 1024
qb >>= 6;
half2_uint32 q7((qb & 0x00070007) | c0); // half2(q[14], q[15]) + 1024
half2_uint32 q8((qb & 0x00380038) | c0); // half2(q[16], q[17]) * 8 + 1024
half2_uint32 q9((qb & 0x01c001c0) | c0); // half2(q[18], q[19]) * 64 + 1024
qb >>= 8;
qb &= 0x00020002;
half2_uint32 q10((qc & 0x00070007) | c0); // half2(q[20], q[21]) + 1024
half2_uint32 q11((qc & 0x00380038) | c0); // half2(q[22], q[23]) * 8 + 1024
qc >>= 6;
half2_uint32 q12((qc & 0x00070007) | c0); // half2(q[24], q[25]) + 1024
half2_uint32 q13((qc & 0x00380038) | c0); // half2(q[26], q[27]) * 8 + 1024
half2_uint32 q14((qc & 0x01c001c0) | c0); // half2(q[28], q[29]) * 64 + 1024
qc >>= 7;
qc &= 0x00040004;
half2_uint32 q15((qa | qb | qc) | c0);
dq[ 0] = __hadd2( q0.as_half2, z1);
dq[ 1] = __hfma2( q1.as_half2, y8, z8);
dq[ 2] = __hadd2( q2.as_half2, z1);
dq[ 3] = __hfma2( q3.as_half2, y8, z8);
dq[ 4] = __hfma2( q4.as_half2, y64, z64);
dq[ 5] = __hadd2( q5.as_half2, z1);
dq[ 6] = __hfma2( q6.as_half2, y8, z8);
dq[ 7] = __hadd2( q7.as_half2, z1);
dq[ 8] = __hfma2( q8.as_half2, y8, z8);
dq[ 9] = __hfma2( q9.as_half2, y64, z64);
dq[10] = __hadd2(q10.as_half2, z1);
dq[11] = __hfma2(q11.as_half2, y8, z8);
dq[12] = __hadd2(q12.as_half2, z1);
dq[13] = __hfma2(q13.as_half2, y8, z8);
dq[14] = __hfma2(q14.as_half2, y64, z64);
dq[15] = __hadd2(q15.as_half2, z1);
}
#else
__forceinline__ __device__ void shuffle_3bit_32
(
uint32_t* q,
int stride
)
{
}
__forceinline__ __device__ void dequant_3bit_32
(
const uint32_t q_0,
const uint32_t q_1,
const uint32_t q_2,
half2 (&dq)[16],
int stride
)
{
half dqh[32];
for (int i = 0; i < 10; i++) dqh[ i] = dq_ns(exb( q_0, i * 3 , 0x07), 4);
dqh[10 ] = dq_ns(exb(q_1, q_0, 30, 0x07), 4);
for (int i = 0; i < 10; i++) dqh[11 + i] = dq_ns(exb( q_1, i * 3 + 1, 0x07), 4);
dqh[21 ] = dq_ns(exb(q_2, q_1, 31, 0x07), 4);
for (int i = 0; i < 10; i++) dqh[22 + i] = dq_ns(exb( q_2, i * 3 + 2, 0x07), 4);
for (int i = 0; i < 16; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]);
}
#endif
#endif
|
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_3.cuh/0
|
{
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_3.cuh",
"repo_id": "text-generation-inference",
"token_count": 3335
}
| 240
|
import pytest
import torch
from copy import copy
from transformers import AutoTokenizer
from text_generation_server.pb import generate_pb2
from text_generation_server.models.causal_lm import CausalLM, CausalLMBatch
@pytest.fixture(scope="session")
def default_causal_lm():
return CausalLM.fallback("gpt2")
@pytest.fixture(scope="session")
def gpt2_tokenizer():
tokenizer = AutoTokenizer.from_pretrained("gpt2", padding_side="left")
tokenizer.pad_token_id = 50256
return tokenizer
@pytest.fixture
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
return generate_pb2.Request(
id=0,
inputs="Test",
input_chunks=generate_pb2.Input(chunks=[generate_pb2.InputChunk(text="Test")]),
prefill_logprobs=True,
truncate=100,
parameters=default_pb_parameters,
stopping_parameters=default_pb_stop_parameters,
)
@pytest.fixture
def default_pb_batch(default_pb_request):
return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)
@pytest.fixture
def default_causal_lm_batch(default_pb_batch, gpt2_tokenizer):
return CausalLMBatch.from_pb(
default_pb_batch, gpt2_tokenizer, torch.float32, torch.device("cpu")
)
@pytest.fixture
def default_multi_requests_causal_lm_batch(default_pb_request, gpt2_tokenizer):
req_0 = copy(default_pb_request)
req_0.id = 1
req_1 = default_pb_request
req_1.id = 2
req_1.stopping_parameters.max_new_tokens = 5
batch_pb = generate_pb2.Batch(id=1, requests=[req_0, req_1], size=2)
return CausalLMBatch.from_pb(
batch_pb, gpt2_tokenizer, torch.float32, torch.device("cpu")
)
def test_batch_from_pb(default_pb_batch, default_causal_lm_batch):
batch = default_causal_lm_batch
assert batch.batch_id == default_pb_batch.id
assert batch.requests == default_pb_batch.requests
assert len(batch.input_ids) == default_pb_batch.size
assert batch.input_ids[0][-1] == 14402
assert torch.all(batch.input_ids[0][:-1] == 50256)
assert batch.attention_mask[0, 0] == 1
assert torch.all(batch.attention_mask[0, 1:] == 0)
assert batch.past_key_values is None
assert all(
[
torch.equal(input_ids, all_input_ids[:, 0])
for input_ids, all_input_ids in zip(batch.input_ids, batch.all_input_ids)
]
)
assert batch.input_lengths == [1]
assert len(batch) == default_pb_batch.size
assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
assert batch.max_input_length == batch.input_lengths[0]
def test_batch_concatenate_no_prefill(default_causal_lm_batch):
with pytest.raises(ValueError):
CausalLMBatch.concatenate([default_causal_lm_batch, default_causal_lm_batch])
def test_causal_lm_batch_type(default_causal_lm):
assert default_causal_lm.batch_type == CausalLMBatch
def test_causal_lm_generate_token(default_causal_lm, default_causal_lm_batch):
sequence_length = len(default_causal_lm_batch.all_input_ids[0])
generations, next_batch, _ = default_causal_lm.generate_token(
default_causal_lm_batch
)
assert len(generations) == len(next_batch)
assert isinstance(next_batch, CausalLMBatch)
assert len(next_batch.all_input_ids) == len(next_batch)
assert len(next_batch.all_input_ids[0]) == sequence_length + 1
assert len(next_batch.attention_mask[0]) == 11
assert next_batch.all_input_ids[0][-1] == 13
assert next_batch.all_input_ids[0][-2] == 14402
assert torch.all(next_batch.all_input_ids[0][:-2] == 50256)
assert torch.all(next_batch.attention_mask[0][0:2] == 1)
assert torch.all(next_batch.attention_mask[0][2:] == 0)
assert next_batch.input_ids.shape == (len(next_batch), 1)
assert next_batch.input_ids[0, 0] == 13
assert next_batch.input_lengths == [2]
assert next_batch.max_input_length == next_batch.input_lengths[0]
assert next_batch.past_key_values is not None
assert all(
[p[0].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values]
)
assert all(
[p[1].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values]
)
assert all([generation.generated_text is None for generation in generations])
assert all([len(generation.prefill_tokens) == 1 for generation in generations])
assert all(
[
token_id.item() == 13
for generation in generations
for token_id in generation.tokens.token_ids
]
)
assert all(
[
token_text == "."
for generation in generations
for token_text in generation.tokens.texts
]
)
assert generations[0].request_id == 0
def test_causal_lm_generate_token_completion(
default_causal_lm, default_causal_lm_batch
):
next_batch = default_causal_lm_batch
for _ in range(default_causal_lm_batch.stopping_criterias[0].max_new_tokens - 1):
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert generations[0].generated_text.text == ".java:784) at net.minecraft."
assert generations[0].request_id == default_causal_lm_batch.requests[0].id
assert (
generations[0].generated_text.generated_tokens
== default_causal_lm_batch.stopping_criterias[0].max_new_tokens
)
def test_causal_lm_generate_token_completion_multi(
default_causal_lm, default_multi_requests_causal_lm_batch
):
next_batch = default_multi_requests_causal_lm_batch
for i in range(
default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 1
):
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert generations[1].generated_text.text == ".java:784)"
assert (
generations[1].request_id
== default_multi_requests_causal_lm_batch.requests[1].id
)
assert (
generations[1].generated_text.generated_tokens
== default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
)
# Copy stopping_criterias before filtering
stopping_criterias = (
default_multi_requests_causal_lm_batch.stopping_criterias.copy()
)
next_batch = next_batch.filter([next_batch.requests[0].id])
for _ in range(
stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1
):
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert generations[0].generated_text.text == ".java:784) at net.minecraft."
assert (
generations[0].request_id
== default_multi_requests_causal_lm_batch.requests[0].id
)
assert (
generations[0].generated_text.generated_tokens
== default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
)
def test_batch_concatenate(
default_causal_lm, default_causal_lm_batch, default_multi_requests_causal_lm_batch
):
next_batch_0 = default_causal_lm_batch
_, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0)
_, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0)
next_batch_1 = default_multi_requests_causal_lm_batch
_, next_batch_1, _ = default_causal_lm.generate_token(next_batch_1)
# Clone past_key_values before concatenating to compare after,
# because they are removed from the concatenated batches
next_batch_0_past_key_values = [
(k.clone(), v.clone()) for (k, v) in next_batch_0.past_key_values
]
next_batch_1_past_key_values = [
(k.clone(), v.clone()) for (k, v) in next_batch_1.past_key_values
]
next_batch = CausalLMBatch.concatenate([next_batch_0, next_batch_1])
assert torch.equal(next_batch.all_input_ids[0], next_batch_0.all_input_ids[0])
assert torch.equal(next_batch.all_input_ids[1], next_batch_1.all_input_ids[0])
assert torch.equal(next_batch.all_input_ids[2], next_batch_1.all_input_ids[1])
assert torch.all(
next_batch.attention_mask[0, : -next_batch.padding_right_offset] == 1
)
assert torch.all(
next_batch.attention_mask[1:, 1 : -next_batch.padding_right_offset] == 1
)
assert torch.all(next_batch.attention_mask[1:, 3:] == 0)
assert next_batch.batch_id == 0
assert next_batch.input_ids[0, 0] == 12355
assert torch.all(next_batch.input_ids[1:] == 13)
assert next_batch.input_lengths == [3, 2, 2]
assert next_batch.max_input_length == 3
assert next_batch.requests[0] == next_batch_0.requests[0]
assert next_batch.requests[1:] == next_batch_1.requests
assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers
assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias
assert next_batch.past_key_values is not None
assert all([p[0].shape == (3, 12, 2, 64) for p in next_batch.past_key_values])
assert all([p[1].shape == (3, 12, 2, 64) for p in next_batch.past_key_values])
for i, past in enumerate(next_batch.past_key_values):
assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:], past[0][0])
assert torch.equal(
next_batch_1_past_key_values[i][0][:, :, -1:], past[0][1:, :, -1:, :]
)
assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:], past[1][0])
assert torch.equal(
next_batch_1_past_key_values[i][1][:, :, -1:], past[1][1:, :, -1:, :]
)
for _ in range(
default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 2
):
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 3
assert generations[2].generated_text.text == ".java:784)"
assert (
generations[2].request_id
== default_multi_requests_causal_lm_batch.requests[1].id
)
assert (
generations[2].generated_text.generated_tokens
== default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
)
next_batch = next_batch.filter(
[next_batch.requests[0].id, next_batch.requests[1].id]
)
for _ in range(
default_causal_lm_batch.stopping_criterias[0].max_new_tokens
- default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
- 2
):
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert generations[0].generated_text.text == ".java:784) at net.minecraft."
assert generations[0].request_id == default_causal_lm_batch.requests[0].id
assert (
generations[0].generated_text.generated_tokens
== default_causal_lm_batch.stopping_criterias[0].max_new_tokens
)
next_batch = next_batch.filter([next_batch.requests[1].id])
for _ in range(
default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
- default_causal_lm_batch.stopping_criterias[0].max_new_tokens
- default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
- 4
):
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert generations[0].generated_text.text == ".java:784) at net.minecraft."
assert (
generations[0].request_id
== default_multi_requests_causal_lm_batch.requests[0].id
)
assert (
generations[0].generated_text.generated_tokens
== default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
)
|
text-generation-inference/server/tests/models/test_causal_lm.py/0
|
{
"file_path": "text-generation-inference/server/tests/models/test_causal_lm.py",
"repo_id": "text-generation-inference",
"token_count": 5387
}
| 241
|
import torch
from typing import Dict, Optional, TypeVar
from text_generation_server.models.types import Batch
B = TypeVar("B", bound=Batch)
class Cache:
def __init__(self):
self.cache: Dict[int, B] = {}
def pop(self, batch_id: int) -> Optional[B]:
return self.cache.pop(batch_id, None)
def set(self, entry: B):
if entry is not None:
self.cache[entry.batch_id] = entry
def delete(self, batch_id: int):
batch = self.pop(batch_id)
if batch is not None:
del batch
if torch.cuda.is_available():
torch.cuda.empty_cache()
def clear(self):
keys = list(self.cache.keys())
for k in keys:
self.delete(k)
def __len__(self):
return len(self.cache.keys())
|
text-generation-inference/server/text_generation_server/cache.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/cache.py",
"repo_id": "text-generation-inference",
"token_count": 359
}
| 242
|
from dataclasses import dataclass
from typing import List, Union
import torch
from text_generation_server.utils.weights import Weight, Weights, WeightsLoader
@dataclass
class Exl2Weight(Weight):
"""
Exllama2 exl2 quantized weights.
"""
q_weight: torch.Tensor
q_scale: torch.Tensor
q_invperm: torch.Tensor
q_scale_max: torch.Tensor
q_groups: torch.Tensor
def __post_init__(self):
self.q_scale_max /= 256
self.q_invperm = self.q_invperm.short()
@property
def device(self) -> torch.device:
return self.q_weight.device
def get_linear(self, bias: torch.Tensor):
from text_generation_server.layers.gptq import ExllamaQuantLinear
return ExllamaQuantLinear(self, bias)
class Exl2WeightsLoader(WeightsLoader):
"""Loader for exl2-quantized weights."""
def get_weights(self, weights: "Weights", prefix: str):
"""
Get weights at the given prefix and apply without tensor paralllism.
"""
try:
q_weight = weights.get_tensor(f"{prefix}.q_weight")
except RuntimeError:
raise RuntimeError(
"Cannot load `exl2`-quantized weight, make sure the model is already quantized."
)
q_scale = weights.get_tensor(f"{prefix}.q_scale")
q_invperm = weights.get_tensor(f"{prefix}.q_invperm")
q_scale_max = weights.get_tensor(f"{prefix}.q_scale_max")
q_groups = weights.get_tensor(f"{prefix}.q_groups")
return Exl2Weight(
q_weight=q_weight,
q_scale=q_scale,
q_invperm=q_invperm,
q_scale_max=q_scale_max,
q_groups=q_groups,
)
def get_weights_col_packed(
self,
weights: Weights,
prefix: str,
block_sizes: Union[int, List[int]],
):
raise RuntimeError("Column-packed weights are not supported for exl")
def get_weights_col(self, weights: Weights, prefix: str):
# Sharding is not yet supported, so we return the weights as-is.
return self.get_weights(weights, prefix)
def get_multi_weights_col(self, weights: Weights, prefixes: List[str], dim: int):
raise ValueError("get_multi_weights_col is not supported for exl2")
def get_weights_row(self, weights: Weights, prefix: str):
# Sharding is not yet supported, so we return the weights as-is.
return self.get_weights(weights, prefix)
|
text-generation-inference/server/text_generation_server/layers/exl2.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/layers/exl2.py",
"repo_id": "text-generation-inference",
"token_count": 1050
}
| 243
|
import functools
from typing import List, Tuple
import numpy
import torch
from text_generation_server.utils.import_utils import SYSTEM
try:
import marlin_kernels
except ImportError:
marlin_kernels = None
try:
major, _minor = torch.cuda.get_device_capability()
has_sm_8_0 = major >= 8
except Exception:
has_sm_8_0 = False
def _check_marlin_kernels():
if not (SYSTEM == "cuda" and has_sm_8_0):
raise NotImplementedError(
"Using quantized Marlin models requires a GPU with CUDA capability 8.0 or later."
)
if marlin_kernels is None:
raise NotImplementedError(
"marlin is not installed, install it with: pip install server/marlin"
)
# https://github.com/IST-DASLab/marlin/blob/2f6d7c10e124b3c5fa29ff8d77d568bd7af3274c/marlin/__init__.py#L40C1-L68C54
@functools.cache
def get_perms() -> Tuple[List[int], List[int]]:
scale_perm = []
for i in range(8):
scale_perm.extend([i + 8 * j for j in range(8)])
scale_perm_single = []
for i in range(4):
scale_perm_single.extend([2 * i + j for j in [0, 1, 8, 9, 16, 17, 24, 25]])
return scale_perm, scale_perm_single
def permute_scales(scales: torch.Tensor):
scale_perm, scale_perm_single = get_perms()
out_features = scales.shape[1]
if scales.shape[0] == 1:
scales = scales.reshape((-1, len(scale_perm_single)))[:, scale_perm_single]
else:
scales = scales.reshape((-1, len(scale_perm)))[:, scale_perm]
return scales.reshape((-1, out_features)).contiguous()
# Functions below are from vLLM
def get_pack_factor(bits: int) -> int:
if 32 % bits != 0:
raise ValueError(f"Cannot {bits} bit values into uint32")
return 32 // bits
def pack_cols(
q_w: torch.Tensor,
num_bits: int,
size_k: int,
size_n: int,
):
assert q_w.shape == (size_k, size_n)
pack_factor = get_pack_factor(num_bits)
assert size_n % pack_factor == 0
orig_device = q_w.device
q_w = q_w.cpu().numpy().astype(numpy.uint32)
q_res = numpy.zeros((size_k, size_n // pack_factor), dtype=numpy.uint32)
for i in range(pack_factor):
q_res |= q_w[:, i::pack_factor] << num_bits * i
q_res = torch.from_numpy(q_res.astype(numpy.int32)).to(orig_device)
q_res = q_res.contiguous()
return q_res
def unpack_cols(
packed_q_w: torch.Tensor,
num_bits: int,
size_k: int,
size_n: int,
):
pack_factor = get_pack_factor(num_bits)
assert size_n % pack_factor == 0
assert packed_q_w.shape == (
size_k,
size_n // pack_factor,
), "packed_q_w.shape = {} size_k = {}, size_n = {} pack_Factor = {}".format(
packed_q_w.shape, size_k, size_n, pack_factor
)
orig_device = packed_q_w.device
packed_q_w_cpu = packed_q_w.cpu().numpy().astype(numpy.uint32)
q_res = numpy.zeros((size_k, size_n), dtype=numpy.uint32)
mask = (1 << num_bits) - 1
for i in range(pack_factor):
vals = packed_q_w_cpu & mask
packed_q_w_cpu >>= num_bits
q_res[:, i::pack_factor] = vals
q_res = torch.from_numpy(q_res.astype(numpy.int32)).to(orig_device)
q_res = q_res.contiguous()
return q_res
def marlin_zero_points(
zp: torch.Tensor, size_k: int, size_n: int, num_bits: int
) -> torch.Tensor:
scale_perm, _ = get_perms()
# Permute zero-points in a similar way to scales, but do not use the
# "single" permutation, since zero-points are applied on every MMA
zp = zp.reshape((-1, len(scale_perm)))[:, scale_perm]
# Interleave column dim (for the dequantize code) and pack it to int32
if num_bits == 4:
interleave = numpy.array([0, 2, 4, 6, 1, 3, 5, 7])
elif num_bits == 8:
interleave = numpy.array([0, 2, 1, 3])
else:
raise Exception("num_bits must be 4 or 8, got {}".format(num_bits))
zp = zp.reshape((-1, len(interleave)))[:, interleave].ravel()
zp = zp.reshape((-1, size_n)).contiguous()
zp = pack_cols(zp, num_bits, size_k, size_n)
return zp
|
text-generation-inference/server/text_generation_server/layers/marlin/util.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/layers/marlin/util.py",
"repo_id": "text-generation-inference",
"token_count": 1782
}
| 244
|
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Idefics model."""
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from transformers import PreTrainedModel
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
dataclass,
)
from text_generation_server.models.custom_modeling.idefics_config import IdeficsConfig
from text_generation_server.models.custom_modeling.idefics_vision import (
IdeficsVisionTransformer,
)
from text_generation_server.models.custom_modeling.idefics_perceiver import (
IdeficsPerceiverResampler,
)
from text_generation_server.layers import (
TensorParallelColumnLinear,
TensorParallelEmbedding,
TensorParallelRowLinear,
SpeculativeHead,
FastLinear,
)
from text_generation_server.layers.rotary import PositionRotaryEmbedding
from text_generation_server.utils.import_utils import SYSTEM
from loguru import logger
if SYSTEM == "cuda":
import dropout_layer_norm
elif SYSTEM == "rocm":
from vllm._C import ops
else:
dropout_layer_norm = None
@dataclass
class BaseModelOutputWithPastImage(BaseModelOutputWithPast):
image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
class CausalLMOutputWithPastImage(CausalLMOutputWithPast):
image_hidden_states: Optional[torch.FloatTensor] = None
# logger = logging.get_logger(__name__)
# _CONFIG_FOR_DOC = "IdeficsConfig"
# IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST = [
# "HuggingFaceM4/idefics-9b",
# "HuggingFaceM4/idefics-80b",
# # See all Idefics models at https://huggingface.co/models?filter=idefics
# ]
def expand_inputs_for_generation(
input_ids,
expand_size=1,
is_encoder_decoder=False,
attention_mask=None,
encoder_outputs=None,
**model_kwargs,
):
expanded_return_idx = (
torch.arange(input_ids.shape[0])
.view(-1, 1)
.repeat(1, expand_size)
.view(-1)
.to(input_ids.device)
)
input_ids = input_ids.index_select(0, expanded_return_idx)
if "token_type_ids" in model_kwargs:
token_type_ids = model_kwargs["token_type_ids"]
model_kwargs["token_type_ids"] = token_type_ids.index_select(
0, expanded_return_idx
)
if attention_mask is not None:
model_kwargs["attention_mask"] = attention_mask.index_select(
0, expanded_return_idx
)
model_kwargs["image_attention_mask"] = model_kwargs[
"image_attention_mask"
].index_select(0, expanded_return_idx)
model_kwargs["pixel_values"] = model_kwargs["pixel_values"].index_select(
0, expanded_return_idx
)
if is_encoder_decoder:
if encoder_outputs is None:
raise ValueError(
"If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined."
)
encoder_outputs["last_hidden_state"] = (
encoder_outputs.last_hidden_state.index_select(
0, expanded_return_idx.to(encoder_outputs.last_hidden_state.device)
)
)
model_kwargs["encoder_outputs"] = encoder_outputs
return input_ids, model_kwargs
def update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder=False):
# must have this key set to at least None
model_kwargs["past_key_values"] = model_kwargs.get("past_key_values", None)
# update past
if "past_key_values" in outputs:
model_kwargs["past"] = outputs.past_key_values
elif "mems" in outputs:
model_kwargs["past"] = outputs.mems
elif "past_buckets_states" in outputs:
model_kwargs["past"] = outputs.past_buckets_states
else:
model_kwargs["past"] = None
# update token_type_ids with last value
if "token_type_ids" in model_kwargs:
token_type_ids = model_kwargs["token_type_ids"]
model_kwargs["token_type_ids"] = torch.cat(
[token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1
)
# update attention masks
if not is_encoder_decoder:
if "attention_mask" in model_kwargs:
attention_mask = model_kwargs["attention_mask"]
model_kwargs["attention_mask"] = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))],
dim=-1,
)
if "image_attention_mask" in model_kwargs:
image_attention_mask = model_kwargs["image_attention_mask"]
last_mask = image_attention_mask[:, -1, :].unsqueeze(1)
model_kwargs["image_attention_mask"] = last_mask
return model_kwargs
def prepare_inputs_for_generation(input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
pixel_values = kwargs.get("pixel_values", None)
image_attention_mask = kwargs.get("image_attention_mask", None)
# if pixel_values is None or image_attention_mask is None:
# raise ValueError("pixel values and image attention mask cannot be None")
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"pixel_values": pixel_values,
"image_attention_mask": image_attention_mask,
}
def freeze_model(model, module_exceptions=[]):
mapping = {
"LayerNorm": nn.LayerNorm,
"Linear": nn.Linear,
"Embedding": nn.Embedding,
}
module_exceptions_mapped = [mapping[m] for m in module_exceptions]
for module in model.modules():
if module_exceptions and any(
[isinstance(module, t) for t in module_exceptions_mapped]
):
module.requires_grad_(
True
) # Explicitely setting it to true to avoid any mistakes
else:
module.requires_grad_(False)
return model
class IdeficsDecoupledPartialTPEmbedding(nn.Module):
def __init__(
self,
config,
weights,
):
super().__init__()
self.num_embeddings = config.vocab_size
self.weight = TensorParallelEmbedding(
prefix="model.embed_tokens", weights=weights
)
self.additional_weight = nn.Parameter(
weights.get_tensor("model.embed_tokens.additional_embedding.weight")
)
def forward(self, input_ids):
# Clone so that we don't modify the original input_ids later on
input_ids = input_ids.clone()
additional_vocab_indices = torch.where(input_ids >= self.num_embeddings)
input_ids_additional_vocab = input_ids[additional_vocab_indices]
additional_embeddings = torch.nn.functional.embedding(
input_ids_additional_vocab - self.num_embeddings, self.additional_weight
)
# for successful lookup replace input_ids with 0, the results of these will be discarded anyway
input_ids[additional_vocab_indices] = 0
full_vector = self.weight(input_ids)
# overwrite the records with high indices
full_vector[additional_vocab_indices] = additional_embeddings
return full_vector
class IdeficsDecoupledTensorParallelLinear(nn.Module):
# Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear
"""
Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters. In practise, the
regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0,
then it will create `out_additional_features * in_features` additional parameters that are always trained. If
`out_additional_features=0`, then the module defaults back to the regular behavior of `nn.Linear`.
"""
def __init__(
self,
config,
weights,
) -> None:
super().__init__()
self.fc = SpeculativeHead.load(config=config, prefix="lm_head", weights=weights)
self.additional_fc = FastLinear.load(
config=config,
prefix="lm_head.additional_fc",
weights=weights,
bias=False,
)
def forward(self, input: torch.Tensor) -> torch.Tensor:
output, speculative_logits = self.fc(input)
additional_features = self.additional_fc(input)
output = torch.cat((output, additional_features), -1)
return output, speculative_logits
def extra_repr(self) -> str:
"""Overwriting `nn.Linear.extra_repr` to include new parameters."""
return "in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}".format(
self.in_features,
self.out_features,
self.out_additional_features,
self.bias is not None,
self.partially_freeze,
)
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size,
dtype: torch.dtype,
device: torch.device,
past_key_values_length: int = 0,
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat(
[
torch.zeros(
tgt_len, past_key_values_length, dtype=dtype, device=device
),
mask,
],
dim=-1,
)
return mask[None, None, :, :].expand(
bsz, 1, tgt_len, tgt_len + past_key_values_length
)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(
inverted_mask.to(torch.bool), torch.finfo(dtype).min
)
class IdeficsRMSNorm(nn.Module):
def __init__(self, prefix, weights, eps=1e-6):
"""
LlamaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
weight = weights.get_tensor(f"{prefix}.weight")
self.weight = nn.Parameter(weight)
self.variance_epsilon = eps
def forward(self, hidden_states, residual=None):
if SYSTEM == "ipex":
import intel_extension_for_pytorch as ipex
out = ipex.llm.functional.add_rms_norm(
residual,
hidden_states,
self.weight,
None,
self.variance_epsilon,
residual is not None,
)
return out
elif hidden_states.shape[-1] > 8192:
if residual is not None:
hidden_states += residual
residual = hidden_states
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(
variance + self.variance_epsilon
)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
elif SYSTEM == "cuda":
# faster post attention rms norm
unwrap = False
if len(hidden_states.shape) > 2:
unwrap = True
shape = hidden_states.shape
hidden_states = hidden_states.reshape(-1, shape[-1])
normed_hidden_states, res, *rest = dropout_layer_norm.dropout_add_ln_fwd(
hidden_states,
residual,
self.weight,
None,
None,
None,
None,
None,
0.0,
self.variance_epsilon,
1.0,
0,
None,
False,
True, # Activate RMSNorm
)
if res is None:
res = hidden_states
if unwrap:
normed_hidden_states = normed_hidden_states.view(*shape)
return normed_hidden_states
elif SYSTEM == "rocm":
# We use VLLM RMSNorm kernel that can be compiled for RoCm, instead of Flash Attention ones that can not.
if residual is not None:
hidden_states += residual
residual = hidden_states
unwrap = False
if len(hidden_states.shape) > 2:
unwrap = True
shape = hidden_states.shape
hidden_states = hidden_states.reshape(-1, shape[-1])
out = torch.empty_like(hidden_states)
ops.rms_norm(
out,
hidden_states,
self.weight.data,
self.variance_epsilon,
)
if unwrap:
out = out.view(*shape)
return out
else:
raise ValueError(
"Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction."
)
# this was adapted from LlamaMLP
class IdeficsMLP(nn.Module):
def __init__(
self,
config,
prefix,
weights,
):
super().__init__()
self.gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
weights=weights,
dim=0,
bias=False,
)
self.down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=False,
)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
gate_up_states = self.gate_up_proj(hidden_states)
shape = gate_up_states.shape
gate_up_states = gate_up_states.view(*shape[:-1], 2, shape[-1] // 2)
return self.down_proj(
self.act_fn(gate_up_states[:, :, 0]) * gate_up_states[:, :, 1]
)
# this was adapted from LlamaAttention
class IdeficsAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
config,
prefix,
weights,
qk_layer_norms: bool = False,
is_cross_attention: bool = False,
):
super().__init__()
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.dropout = config.dropout
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.is_cross_attention = is_cross_attention
# if not hasattr(nn.functional, "scaled_dot_product_attention"):
# raise ValueError("this model requires pytorch 2.0 or higher")
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads //= weights.process_group.size()
if self.is_cross_attention:
# kv_input_dim = (
# self.hidden_size if not hasattr(config.vision_config, "embed_dim") else config.vision_config.embed_dim
# )
self.q_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.q_proj", weights=weights, bias=False
)
self.k_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.k_proj", weights=weights, bias=False
)
self.v_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.v_proj", weights=weights, bias=False
)
else:
self.qkv = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=False,
)
self.o_proj = TensorParallelRowLinear.load(
config, prefix=f"{prefix}.o_proj", weights=weights, bias=False
)
self.rotary_emb = PositionRotaryEmbedding.static(
config=config, dim=self.head_dim, base=10000.0, device=weights.device
)
self.qk_layer_norms = qk_layer_norms
if self.qk_layer_norms:
self.q_layer_norm = IdeficsRMSNorm(
prefix=f"{prefix}.q_layer_norm",
weights=weights,
eps=config.rms_norm_eps,
)
self.k_layer_norm = IdeficsRMSNorm(
prefix=f"{prefix}.q_layer_norm",
weights=weights,
eps=config.rms_norm_eps,
)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return (
tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
.contiguous()
)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
# if key_value_states are provided this layer is used as a cross-attention layer
is_cross_attention = self.is_cross_attention or key_value_states is not None
bsz, q_len, _ = hidden_states.size()
if is_cross_attention:
query_states = self.q_proj(hidden_states).view(
bsz, q_len, self.num_heads, self.head_dim
) # .transpose(1, 2)
query_states = query_states.transpose(1, 2)
(
_,
kv_len,
_,
) = (
key_value_states.size()
) # Note that, in this case, `kv_len` == `kv_seq_len`
key_states = (
self.k_proj(key_value_states)
.view(bsz, kv_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
value_states = (
self.v_proj(key_value_states)
.view(bsz, kv_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
else:
qkv = self.qkv(hidden_states)
query_states, key_states, value_states = qkv.split(
self.num_heads * self.head_dim, dim=2
)
query_states = query_states.view(
bsz, q_len, self.num_heads, self.head_dim
) # .transpose(1, 2)
key_states = key_states.view(
bsz, q_len, self.num_heads, self.head_dim
) # . transpose(1, 2)
value_states = value_states.view(
bsz, q_len, self.num_heads, self.head_dim
) # .transpose(1, 2)
kv_seq_len = q_len
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
max_s = max(kv_seq_len, q_len)
cos, sin = self.rotary_emb.get_cos_sin(
position_ids.view(-1), max_s, hidden_states.dtype
)
query_shape = query_states.shape
key_shape = key_states.shape
self.rotary_emb(
query_states.view(-1, *query_shape[2:]),
key_states.reshape(-1, *key_shape[2:]),
cos,
sin,
)
query_states = query_states.view(query_shape)
key_states = key_states.view(key_shape)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
kv_seq_len = key_states.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
# [bsz, nh, t, hd]
if past_key_value is not None:
# reuse k, v, self_attention
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
past_key_value = (key_states, value_states) if use_cache else None
if self.qk_layer_norms:
query_states = self.q_layer_norm(query_states)
key_states = self.k_layer_norm(key_states)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
)
attn_output = nn.functional.scaled_dot_product_attention(
query_states,
key_states,
value_states,
attn_mask=attention_mask,
dropout_p=self.dropout,
)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
attn_weights = None
if output_attentions:
logger.warning_once(
"attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead"
)
return attn_output, attn_weights, past_key_value
# this was adapted from LlamaDecoderLayer
class IdeficsDecoderLayer(nn.Module):
def __init__(self, layer_id: int, config: IdeficsConfig, weights):
super().__init__()
self.process_group = weights.process_group
self.hidden_size = config.hidden_size
prefix = f"model.layers.{layer_id}"
self.self_attn = IdeficsAttention(
config=config,
prefix=f"{prefix}.self_attn",
weights=weights,
qk_layer_norms=False,
is_cross_attention=False,
)
self.mlp = IdeficsMLP(
config=config,
prefix=f"{prefix}.mlp",
weights=weights,
)
self.input_layernorm = IdeficsRMSNorm(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = IdeficsRMSNorm(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.dropout = config.dropout
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
) -> Tuple[
torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
# hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
# hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
class IdeficsGatedCrossAttentionLayer(nn.Module):
def __init__(self, layer_id, config: IdeficsConfig, weights):
super().__init__()
self.process_group = weights.process_group
self.hidden_size = config.hidden_size
prefix = f"model.gated_cross_attn_layers.{layer_id}"
self.cross_attn = IdeficsAttention(
config=config,
prefix=f"{prefix}.cross_attn",
weights=weights,
qk_layer_norms=True,
is_cross_attention=True,
)
self.mlp = IdeficsMLP(
config=config,
prefix=f"{prefix}.mlp",
weights=weights,
)
self.input_layernorm = IdeficsRMSNorm(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = IdeficsRMSNorm(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.config = config.dropout
self.act_cross_attn = nn.Tanh()
self.act_dense = nn.Tanh()
self.alpha_cross_attn = nn.Parameter(
weights.get_tensor(f"{prefix}.alpha_cross_attn")
)
self.alpha_dense = nn.Parameter(weights.get_tensor(f"{prefix}.alpha_dense"))
if not (hasattr(self, "alpha_cross_attn") and hasattr(self, "alpha_dense")):
raise ValueError("Alpha parameters not initialized correctly!")
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
image_hidden_states: Optional[torch.Tensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
no_images: Optional[bool] = False,
) -> Tuple[
torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
no_images (`bool`, *optional*, defaults to `False`): If `True` the vision part is ignored
"""
if image_hidden_states is None:
raise ValueError(
"`image_hidden_states` is required for Idefics cross attention module which are visual features to be"
" conditioned on."
)
if past_key_value is not None:
raise NotImplementedError(
"Past key value states are not implemented for Idefics cross attention module."
)
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.cross_attn(
hidden_states=hidden_states,
key_value_states=image_hidden_states,
attention_mask=image_attention_mask,
output_attentions=output_attentions,
)
# hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
# when there are no images the model is used in pure language mode
gate = 0 if no_images else 1
hidden_states = (
residual + gate * self.act_cross_attn(self.alpha_cross_attn) * hidden_states
)
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
# hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
LLAMA_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`IdeficsConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
# @add_start_docstrings(
# "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
# LLAMA_START_DOCSTRING,
# )
class IdeficsPreTrainedModel(PreTrainedModel):
config_class = IdeficsConfig
# base_model_prefix = "model"
# supports_gradient_checkpointing = True
# _no_split_modules = ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"]
# def _init_weights(self, module):
# # important: this ported version of Idefics isn't meant for training from scratch - only
# # inference and fine-tuning - so the proper init weights code has been removed - the m4 code
# # base should be used for training from scratch and it contains the correct code.
# std = self.config.initializer_range
# if isinstance(module, nn.Linear):
# module.weight.data.normal_(mean=0.0, std=std)
# if module.bias is not None:
# module.bias.data.zero_()
# elif isinstance(module, nn.Embedding):
# module.weight.data.normal_(mean=0.0, std=std)
# if module.padding_idx is not None:
# module.weight.data[module.padding_idx].zero_()
# def _set_gradient_checkpointing(self, module, value=False):
# if isinstance(module, IdeficsModel):
# module.gradient_checkpointing = value
# LLAMA_INPUTS_DOCSTRING = r"""
# Args:
# input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
# Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
# it.
# Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
# [`PreTrainedTokenizer.__call__`] for details.
# [What are input IDs?](../glossary#input-ids)
# attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
# Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
# - 1 for tokens that are **not masked**,
# - 0 for tokens that are **masked**.
# [What are attention masks?](../glossary#attention-mask)
# Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
# [`PreTrainedTokenizer.__call__`] for details.
# If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
# `past_key_values`).
# If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
# and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
# information on the default strategy.
# - 1 indicates the head is **not masked**,
# - 0 indicates the head is **masked**.
# position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
# Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
# config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
# past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
# Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
# `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
# `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
# Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
# blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
# If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
# don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
# `decoder_input_ids` of shape `(batch_size, sequence_length)`.
# inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
# Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
# is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
# model's internal embedding lookup matrix.
# use_cache (`bool`, *optional*):
# If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
# `past_key_values`).
# output_attentions (`bool`, *optional*):
# Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
# tensors for more detail.
# output_hidden_states (`bool`, *optional*):
# Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
# more detail.
# return_dict (`bool`, *optional*):
# Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
# """
# @add_start_docstrings(
# "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
# LLAMA_START_DOCSTRING,
# )
class IdeficsModel(IdeficsPreTrainedModel):
# """
# Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`]
# Args:
# config: IdeficsConfig
# """
def __init__(self, config: IdeficsConfig, weights):
super().__init__(config)
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = IdeficsDecoupledPartialTPEmbedding(
config=config,
weights=weights,
)
self.image_size = config.vision_config.image_size
self.vision_config = config.vision_config
self.vision_model = IdeficsVisionTransformer(
prefix="model.vision_model",
config=config.vision_config,
weights=weights,
)
# Perceiver Resampler
if config.use_resampler:
perceiver_config = config.perceiver_config
self.perceiver_resampler = IdeficsPerceiverResampler(
prefix="model.perceiver_resampler",
config=config,
embed_dim=config.vision_config.embed_dim,
depth=perceiver_config.resampler_depth,
n_heads=perceiver_config.resampler_n_heads,
head_dim=perceiver_config.resampler_head_dim,
n_latents=perceiver_config.resampler_n_latents,
weights=weights,
)
self.layers = nn.ModuleList(
[
IdeficsDecoderLayer(layer_id, config, weights)
for layer_id in range(config.num_hidden_layers)
]
)
self.cross_layer_interval = config.cross_layer_interval
num_cross_layers = config.num_hidden_layers // self.cross_layer_interval
self.gated_cross_attn_layers = nn.ModuleList(
[
IdeficsGatedCrossAttentionLayer(layer_id, config, weights)
for layer_id in range(num_cross_layers)
]
)
# self.gradient_checkpointing = False
self.norm = IdeficsRMSNorm(
prefix="model.norm", weights=weights, eps=config.rms_norm_eps
)
# self.gradient_checkpointing = False
# Initialize weights and apply final processing
# self.post_init()
# self.freeze_relevant_params(config)
# def freeze_relevant_params(self, config=None):
# if config is None:
# config = self.config
# if config.freeze_text_layers:
# self.freeze_text_layers(config.freeze_text_module_exceptions)
# if config.freeze_vision_layers:
# freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions)
# def freeze_text_layers(self, module_exceptions=[]):
# for module in [self.layers, self.norm]:
# freeze_model(module, module_exceptions=module_exceptions)
# def freeze_vision_layers(self, module_exceptions=[]):
# freeze_model(self.vision_model, module_exceptions=module_exceptions)
# def get_input_embeddings(self):
# return self.embed_tokens
# def set_input_embeddings(self, value):
# self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(
self, attention_mask, input_shape, inputs_embeds, past_key_values_length
):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
past_key_values_length=past_key_values_length,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
).to(inputs_embeds.device)
combined_attention_mask = (
expanded_attn_mask
if combined_attention_mask is None
else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
# @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
image_hidden_states: Optional[torch.FloatTensor] = None,
image_embeddings: Optional[torch.FloatTensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPastImage]:
device = input_ids.device if input_ids is not None else inputs_embeds.device
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
)
elif input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError(
"You have to specify either decoder_input_ids or decoder_inputs_embeds"
)
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = past_key_values[0][0].shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
elif position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(
past_key_values_length,
seq_length + past_key_values_length,
dtype=torch.long,
device=device,
)
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
else:
position_ids = position_ids.view(-1, seq_length).long()
no_images = False
if image_hidden_states is None:
if pixel_values is None and image_embeddings is None:
raise ValueError(
"Either pixel_values and image_embeddings have to be not-None."
)
elif pixel_values is not None and image_embeddings is not None:
raise ValueError(
"You cannot specify both pixel_values and image_embeddings at the same time"
)
elif pixel_values is not None:
no_images = len(torch.nonzero(pixel_values)) == 0
pixel_values = pixel_values.to(
dtype=self.dtype, device=device
) # fp16 compatibility
batch_size, num_images = pixel_values.shape[:2]
pixel_values = pixel_values.contiguous().view(
batch_size * num_images, *pixel_values.shape[2:]
)
# Get sequence from the vision encoder
image_hidden_states = self.vision_model(
pixel_values=pixel_values
).last_hidden_state
elif image_embeddings is not None:
(
batch_size,
num_images,
image_seq_len,
image_hidden_size,
) = image_embeddings.size()
image_hidden_states = image_embeddings.to(
dtype=self.dtype, device=input_ids.device
)
image_hidden_states = image_hidden_states.view(
batch_size * num_images, image_seq_len, image_hidden_size
)
if self.config.use_resampler:
image_hidden_states = self.perceiver_resampler(image_hidden_states)
image_seq_len, image_hidden_size = image_hidden_states.size(
1
), image_hidden_states.size(2)
image_hidden_states = image_hidden_states.view(
batch_size, num_images * image_seq_len, image_hidden_size
)
else:
no_images = False
num_images = pixel_values.shape[1]
image_seq_len = image_hidden_states.shape[1] // num_images
# # Hack to use the model in full language modeling mode
# image_attention_mask = torch.zeros(batch_size, seq_length, 1, dtype=torch.long, device=image_hidden_states.device)
# Make image_attention_mask compatible with hidden states
text_seq_len = image_attention_mask.size(1)
image_attention_mask = image_attention_mask.unsqueeze(-1)
image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len)
image_attention_mask = image_attention_mask.view(
batch_size, text_seq_len, num_images * image_seq_len
)
image_batch_size, image_sequence_length, _ = image_hidden_states.size()
image_hidden_shape = (image_batch_size, image_sequence_length)
if image_attention_mask is None:
image_attention_mask = torch.ones(image_hidden_shape, device=device)
image_attention_mask = self.invert_attention_mask(image_attention_mask)
# if list(image_attention_mask.shape) != [4, 1, 1024, 64]:
# raise ValueError(f"Image hidden_states {image_hidden_states.shape} - mask {image_attention_mask.shape} {num_images} {image_seq_len} {text_seq_len}")
# if image_hidden_states is not None:
# else:
# image_attention_mask = None
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
# embed positions
if attention_mask is None:
attention_mask = torch.ones(
(batch_size, seq_length_with_past),
dtype=torch.bool,
device=inputs_embeds.device,
)
attention_mask = self._prepare_decoder_attention_mask(
attention_mask,
(batch_size, seq_length),
inputs_embeds,
past_key_values_length,
)
hidden_states = inputs_embeds
# if self.gradient_checkpointing and self.training:
# if use_cache:
# logger.warning_once(
# "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
# )
# use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = () if use_cache else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
past_key_value = (
past_key_values[idx] if past_key_values is not None else None
)
def vblock(
main_block,
hidden_states,
attention_mask,
position_ids,
past_key_value,
image_hidden_states,
image_attention_mask,
output_attentions,
use_cache,
no_images,
layer_idx,
cross_layer_interval,
gated_cross_attn_layers,
):
# TODO(ls): Add cross attention values to respective lists
if layer_idx % cross_layer_interval == 0:
xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval]
outputs = xblock(
hidden_states,
attention_mask=attention_mask,
image_hidden_states=image_hidden_states,
image_attention_mask=image_attention_mask,
output_attentions=output_attentions,
use_cache=use_cache,
past_key_value=None, # not implemented
no_images=no_images,
)
hidden_states = outputs[0]
layer_outputs = main_block(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
return layer_outputs
# if self.gradient_checkpointing and self.training:
# past_key_value = None
# if use_cache:
# logger.warning_once(
# "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
# )
# use_cache = False
# layer_outputs = torch.utils.checkpoint.checkpoint(
# vblock,
# decoder_layer,
# hidden_states,
# attention_mask,
# position_ids,
# past_key_value,
# image_hidden_states,
# image_attention_mask,
# output_attentions,
# use_cache,
# no_images,
# idx,
# self.cross_layer_interval,
# self.gated_cross_attn_layers,
# )
# else:
layer_outputs = vblock(
decoder_layer,
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
image_hidden_states=image_hidden_states,
image_attention_mask=image_attention_mask,
output_attentions=output_attentions,
use_cache=use_cache,
no_images=no_images,
layer_idx=idx,
cross_layer_interval=self.cross_layer_interval,
gated_cross_attn_layers=self.gated_cross_attn_layers,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
if v is not None
)
return BaseModelOutputWithPastImage(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
image_hidden_states=image_hidden_states,
)
class IdeficsForVisionText2Text(IdeficsPreTrainedModel):
def __init__(
self,
config,
weights,
):
super().__init__(config)
self.model = IdeficsModel(
config=config,
weights=weights,
)
self.lm_head = IdeficsDecoupledTensorParallelLinear(
config=config,
weights=weights,
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
image_embeddings: Optional[torch.FloatTensor] = None,
image_hidden_states: Optional[torch.FloatTensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPastImage]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, LlamaForCausalLM
>>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
>>> prompt = "Hey, are you consciours? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
```"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
pixel_values=pixel_values,
image_embeddings=image_embeddings,
image_hidden_states=image_hidden_states,
image_attention_mask=image_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits, speculative_logits = self.lm_head(hidden_states)
loss = None
return (
CausalLMOutputWithPastImage(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
),
speculative_logits,
)
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs)
unwanted_kwargs = ["token_type_ids"]
for kwarg in unwanted_kwargs:
inputs.pop(kwarg, None)
return inputs
@staticmethod
def _expand_inputs_for_generation(
*args,
**model_kwargs,
):
return expand_inputs_for_generation(*args, **model_kwargs)
@staticmethod
def _update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=False
):
return update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=is_encoder_decoder
)
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (
tuple(
past_state.index_select(0, beam_idx) for past_state in layer_past
),
)
return reordered_past
|
text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_modeling.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 28596
}
| 245
|
import torch
import torch.distributed
from typing import Optional
from text_generation_server.models.custom_modeling.idefics_config import IdeficsConfig
from text_generation_server.models.custom_modeling.idefics_processing import (
IdeficsProcessor,
)
from transformers import LlamaTokenizerFast
from text_generation_server.models.custom_modeling.idefics_modeling import (
IdeficsForVisionText2Text,
)
from text_generation_server.models.idefics_causal_lm import IdeficsCausalLM
from text_generation_server.utils import (
initialize_torch_distributed,
weight_files,
Weights,
)
from text_generation_server.utils.quantization import get_loader
from text_generation_server.utils.import_utils import SYSTEM
class IDEFICSSharded(IdeficsCausalLM):
def __init__(
self,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
speculator: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
):
self.quantize = quantize
self.process_group, rank, world_size = initialize_torch_distributed()
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
# 9b seems to work correctly enough in float16, but 80b seems
# to be really saturating for f16.
dtype = torch.float16 if dtype is None else dtype
elif SYSTEM == "ipex":
if hasattr(torch, "xpu") and torch.xpu.is_available():
device = torch.device(f"xpu:{rank}")
dtype = torch.float16 if dtype is None else dtype
else:
device = torch.device("cpu")
# Float16 doesn't exist on target.
dtype = torch.bfloat16 if dtype is None else dtype
else:
device = torch.device("cpu")
dtype = torch.float32 if dtype is None else dtype
self.device, self.dtype = device, dtype
config = IdeficsConfig.from_pretrained(
model_id,
revision=revision,
trust_remote_code=trust_remote_code,
)
config.quantize = quantize
config.speculator = speculator
config.vision_config.quantize = quantize
tokenizer = LlamaTokenizerFast.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
self.processor = IdeficsProcessor.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
weights_loader = get_loader(
quantize=quantize, model_id=model_id, revision=revision
)
torch.distributed.barrier(group=self.process_group)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
weights = Weights(
filenames,
device=device,
dtype=dtype,
process_group=self.process_group,
weights_loader=weights_loader,
)
model = IdeficsForVisionText2Text(config, weights)
torch.distributed.barrier(group=self.process_group)
super(IdeficsCausalLM, self).__init__(
model_id=model_id,
model=model,
tokenizer=tokenizer,
requires_padding=True,
dtype=dtype,
device=device,
rank=rank,
world_size=world_size,
)
|
text-generation-inference/server/text_generation_server/models/idefics.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/models/idefics.py",
"repo_id": "text-generation-inference",
"token_count": 1649
}
| 246
|
import time
import os
from datetime import timedelta
from loguru import logger
from pathlib import Path
from typing import Optional, List
from huggingface_hub import file_download, hf_api, HfApi, hf_hub_download
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
from huggingface_hub.utils import (
LocalEntryNotFoundError,
EntryNotFoundError,
RevisionNotFoundError, # noqa # Import here to ease try/except in other part of the lib
)
WEIGHTS_CACHE_OVERRIDE = os.getenv("WEIGHTS_CACHE_OVERRIDE", None)
HF_HUB_OFFLINE = os.environ.get("HF_HUB_OFFLINE", "0").lower() in ["true", "1", "yes"]
def _cached_adapter_weight_files(
adapter_id: str, revision: Optional[str], extension: str
) -> List[str]:
"""Guess weight files from the cached revision snapshot directory"""
d = _get_cached_revision_directory(adapter_id, revision)
if not d:
return []
filenames = _adapter_weight_files_from_dir(d, extension)
return filenames
def _cached_weight_files(
model_id: str, revision: Optional[str], extension: str
) -> List[str]:
"""Guess weight files from the cached revision snapshot directory"""
d = _get_cached_revision_directory(model_id, revision)
if not d:
return []
filenames = _weight_files_from_dir(d, extension)
return filenames
def _weight_hub_files_from_model_info(
info: hf_api.ModelInfo, extension: str
) -> List[str]:
return [
s.rfilename
for s in info.siblings
if s.rfilename.endswith(extension)
and len(s.rfilename.split("/")) == 1
and "arguments" not in s.rfilename
and "args" not in s.rfilename
and "training" not in s.rfilename
]
def _weight_files_from_dir(d: Path, extension: str) -> List[str]:
# os.walk: do not iterate, just scan for depth 1, not recursively
# see _weight_hub_files_from_model_info, that's also what is
# done there with the len(s.rfilename.split("/")) == 1 condition
root, _, files = next(os.walk(str(d)))
filenames = [
os.path.join(root, f)
for f in files
if f.endswith(extension)
and "arguments" not in f
and "args" not in f
and "adapter" not in f
and "training" not in f
]
return filenames
def _adapter_weight_files_from_dir(d: Path, extension: str) -> List[str]:
# os.walk: do not iterate, just scan for depth 1, not recursively
# see _weight_files_from_dir, that's also what is done there
root, _, files = next(os.walk(str(d)))
filenames = [
os.path.join(root, f)
for f in files
if f.endswith(extension)
and "arguments" not in f
and "args" not in f
and "training" not in f
]
return filenames
def _adapter_config_files_from_dir(d: Path) -> List[str]:
# os.walk: do not iterate, just scan for depth 1, not recursively
# see _weight_files_from_dir, that's also what is done there
root, _, files = next(os.walk(str(d)))
filenames = [
os.path.join(root, f)
for f in files
if f.endswith(".json") and "arguments" not in f and "args" not in f
]
return filenames
def _get_cached_revision_directory(
model_id: str, revision: Optional[str]
) -> Optional[Path]:
if revision is None:
revision = "main"
repo_cache = Path(HUGGINGFACE_HUB_CACHE) / Path(
file_download.repo_folder_name(repo_id=model_id, repo_type="model")
)
if not repo_cache.is_dir():
# No cache for this model
return None
refs_dir = repo_cache / "refs"
snapshots_dir = repo_cache / "snapshots"
# Resolve refs (for instance to convert main to the associated commit sha)
if refs_dir.is_dir():
revision_file = refs_dir / revision
if revision_file.exists():
with revision_file.open() as f:
revision = f.read()
# Check if revision folder exists
if not snapshots_dir.exists():
return None
cached_shas = os.listdir(snapshots_dir)
if revision not in cached_shas:
# No cache for this revision and we won't try to return a random revision
return None
return snapshots_dir / revision
def weight_hub_files(
model_id: str, revision: Optional[str] = None, extension: str = ".safetensors"
) -> List[str]:
"""Get the weights filenames on the hub"""
api = HfApi()
if HF_HUB_OFFLINE:
filenames = _cached_weight_files(model_id, revision, extension)
else:
# Online case, fetch model info from the Hub
info = api.model_info(model_id, revision=revision)
filenames = _weight_hub_files_from_model_info(info, extension)
if not filenames:
raise EntryNotFoundError(
f"No {extension} weights found for model {model_id} and revision {revision}.",
None,
)
return filenames
def try_to_load_from_cache(
model_id: str, revision: Optional[str], filename: str
) -> Optional[Path]:
"""Try to load a file from the Hugging Face cache"""
d = _get_cached_revision_directory(model_id, revision)
if not d:
return None
# Check if file exists in cache
cached_file = d / filename
return cached_file if cached_file.is_file() else None
def weight_files(
model_id: str, revision: Optional[str] = None, extension: str = ".safetensors"
) -> List[Path]:
"""Get the local files"""
# Local model
d = Path(model_id)
if d.exists() and d.is_dir():
local_files = _weight_files_from_dir(d, extension)
if not local_files:
raise FileNotFoundError(
f"No local weights found in {model_id} with extension {extension}"
)
return [Path(f) for f in local_files]
try:
filenames = weight_hub_files(model_id, revision, extension)
except EntryNotFoundError as e:
if extension != ".safetensors":
raise e
# Try to see if there are pytorch weights
pt_filenames = weight_hub_files(model_id, revision, extension=".bin")
# Change pytorch extension to safetensors extension
# It is possible that we have safetensors weights locally even though they are not on the
# hub if we converted weights locally without pushing them
filenames = [
f"{Path(f).stem.lstrip('pytorch_')}.safetensors" for f in pt_filenames
]
if WEIGHTS_CACHE_OVERRIDE is not None:
files = []
for filename in filenames:
p = Path(WEIGHTS_CACHE_OVERRIDE) / filename
if not p.exists():
raise FileNotFoundError(
f"File {p} not found in {WEIGHTS_CACHE_OVERRIDE}."
)
files.append(p)
return files
files = []
for filename in filenames:
cache_file = try_to_load_from_cache(
model_id, revision=revision, filename=filename
)
if cache_file is None:
raise LocalEntryNotFoundError(
f"File {filename} of model {model_id} not found in "
f"{os.getenv('HUGGINGFACE_HUB_CACHE', 'the local cache')}. "
f"Please run `text-generation-server download-weights {model_id}` first."
)
files.append(cache_file)
return files
def download_weights(
filenames: List[str], model_id: str, revision: Optional[str] = None
) -> List[Path]:
"""Download the safetensors files from the hub"""
def download_file(fname, tries=5, backoff: int = 5):
local_file = try_to_load_from_cache(model_id, revision, fname)
if local_file is not None:
logger.info(f"File {fname} already present in cache.")
return Path(local_file)
for idx in range(tries):
try:
logger.info(f"Download file: {fname}")
stime = time.time()
local_file = hf_hub_download(
filename=fname,
repo_id=model_id,
revision=revision,
local_files_only=HF_HUB_OFFLINE,
)
logger.info(
f"Downloaded {local_file} in {timedelta(seconds=int(time.time() - stime))}."
)
return Path(local_file)
except Exception as e:
if idx + 1 == tries:
raise e
logger.error(e)
logger.info(f"Retrying in {backoff} seconds")
time.sleep(backoff)
logger.info(f"Retry {idx + 1}/{tries - 1}")
# We do this instead of using tqdm because we want to parse the logs with the launcher
start_time = time.time()
files = []
for i, filename in enumerate(filenames):
file = download_file(filename)
elapsed = timedelta(seconds=int(time.time() - start_time))
remaining = len(filenames) - (i + 1)
eta = (elapsed / (i + 1)) * remaining if remaining > 0 else 0
logger.info(f"Download: [{i + 1}/{len(filenames)}] -- ETA: {eta}")
files.append(file)
return files
|
text-generation-inference/server/text_generation_server/utils/hub.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/utils/hub.py",
"repo_id": "text-generation-inference",
"token_count": 3950
}
| 247
|
<p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg">
<a href="https://github.com/huggingface/tokenizers/blob/main/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue&cachedrop">
</a>
<a href="https://pepy.tech/project/tokenizers">
<img src="https://pepy.tech/badge/tokenizers/week" />
</a>
</p>
Provides an implementation of today's most used tokenizers, with a focus on performance and
versatility.
## Main features:
- Train new vocabularies and tokenize, using today's most used tokenizers.
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for research and production.
- Normalization comes with alignments tracking. It's always possible to get the part of the
original sentence that corresponds to a given token.
- Does all the pre-processing: Truncate, Pad, add the special tokens your model needs.
## Performances
Performances can vary depending on hardware, but running the [~/bindings/python/benches/test_tiktoken.py](bindings/python/benches/test_tiktoken.py) should give the following on a g6 aws instance:

## Bindings
We provide bindings to the following languages (more to come!):
- [Rust](https://github.com/huggingface/tokenizers/tree/main/tokenizers) (Original implementation)
- [Python](https://github.com/huggingface/tokenizers/tree/main/bindings/python)
- [Node.js](https://github.com/huggingface/tokenizers/tree/main/bindings/node)
- [Ruby](https://github.com/ankane/tokenizers-ruby) (Contributed by @ankane, external repo)
## Quick example using Python:
Choose your model between Byte-Pair Encoding, WordPiece or Unigram and instantiate a tokenizer:
```python
from tokenizers import Tokenizer
from tokenizers.models import BPE
tokenizer = Tokenizer(BPE())
```
You can customize how pre-tokenization (e.g., splitting into words) is done:
```python
from tokenizers.pre_tokenizers import Whitespace
tokenizer.pre_tokenizer = Whitespace()
```
Then training your tokenizer on a set of files just takes two lines of codes:
```python
from tokenizers.trainers import BpeTrainer
trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
tokenizer.train(files=["wiki.train.raw", "wiki.valid.raw", "wiki.test.raw"], trainer=trainer)
```
Once your tokenizer is trained, encode any text with just one line:
```python
output = tokenizer.encode("Hello, y'all! How are you π ?")
print(output.tokens)
# ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"]
```
Check the [documentation](https://huggingface.co/docs/tokenizers/index)
or the [quicktour](https://huggingface.co/docs/tokenizers/quicktour) to learn more!
|
tokenizers/README.md/0
|
{
"file_path": "tokenizers/README.md",
"repo_id": "tokenizers",
"token_count": 1056
}
| 248
|
/* eslint-disable */
var globRequire = require;
describe("pipelineExample", () => {
// This is a hack to let us require using path similar to what the user has to use
function require(mod: string) {
if (mod.startsWith("tokenizers")) {
// let path = mod.slice("tokenizers".length);
return globRequire("../../");
} else {
return globRequire(mod);
}
}
let console = {
log: (..._args: any[]) => {}
};
it("shows pipeline parts", async () => {
// START reload_tokenizer
let { Tokenizer } = require("tokenizers");
let tokenizer = Tokenizer.fromFile("data/tokenizer-wiki.json");
// END reload_tokenizer
// START setup_normalizer
let { sequenceNormalizer, nfdNormalizer, stripAccentsNormalizer } = require("tokenizers");
let normalizer = sequenceNormalizer([nfdNormalizer(), stripAccentsNormalizer()]);
// END setup_normalizer
// START test_normalizer
let normalized = normalizer.normalizeString("HΓ©llΓ² hΓ΄w are ΓΌ?")
// "Hello how are u?"
// END test_normalizer
expect(normalized).toEqual("Hello how are u?");
// START replace_normalizer
tokenizer.setNormalizer(normalizer)
// END replace_normalizer
// START setup_pre_tokenizer
let { whitespacePreTokenizer } = require("tokenizers");
var preTokenizer = whitespacePreTokenizer();
var preTokenized = preTokenizer.preTokenizeString("Hello! How are you? I'm fine, thank you.");
// END setup_pre_tokenizer
expect(preTokenized).toEqual([
["Hello", [0, 5]],
["!", [5, 6]],
["How", [7, 10]],
["are", [11, 14]],
["you", [15, 18]],
["?", [18, 19]],
["I", [20, 21]],
["'", [21, 22]],
['m', [22, 23]],
["fine", [24, 28]],
[",", [28, 29]],
["thank", [30, 35]],
["you", [36, 39]],
[".", [39, 40]]
]);
// START combine_pre_tokenizer
let { sequencePreTokenizer, digitsPreTokenizer } = require("tokenizers");
var preTokenizer = sequencePreTokenizer([whitespacePreTokenizer(), digitsPreTokenizer(true)]);
var preTokenized = preTokenizer.preTokenizeString("Call 911!");
// END combine_pre_tokenizer
// START replace_pre_tokenizer
tokenizer.setPreTokenizer(preTokenizer)
// END replace_pre_tokenizer
// START setup_processor
let { templateProcessing } = require("tokenizers");
tokenizer.setPostProcessor(templateProcessing(
"[CLS] $A [SEP]",
"[CLS] $A [SEP] $B:1 [SEP]:1",
[["[CLS]", 1], ["[SEP]", 2]]
));
// END setup_processor
// START test_decoding
let output = await tokenizer.encode("Hello, y'all! How are you π ?");
console.log(output.getIds());
// [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]
let decoded = await tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2], true);
// "Hello , y ' all ! How are you ?"
// END test_decoding
expect(decoded).toEqual("Hello , y ' all ! How are you ?");
});
it.skip("trains the tokenizer", async () => {
// START bert_setup_tokenizer
let { Tokenizer } = require("tokenizers");
let { WordPiece } = require("tokenizers");
let bertTokenizer = new Tokenizer(WordPiece.init({}, { unkToken: "[UNK]" }));
// END bert_setup_tokenizer
// START bert_setup_normalizer
let { sequenceNormalizer, lowercaseNormalizer, nfdNormalizer, stripAccentsNormalizer }
= require("tokenizers");
bertTokenizer.setNormalizer(sequenceNormalizer([
nfdNormalizer(), lowercaseNormalizer(), stripAccentsNormalizer()
]))
// END bert_setup_normalizer
// START bert_setup_pre_tokenizer
let { whitespacePreTokenizer } = require("tokenizers");
bertTokenizer.setPreTokenizer(whitespacePreTokenizer());
// END bert_setup_pre_tokenizer
// START bert_setup_processor
let { templateProcessing } = require("tokenizers");
bertTokenizer.setPostProcessor(templateProcessing(
"[CLS] $A [SEP]",
"[CLS] $A [SEP] $B:1 [SEP]:1",
[["[CLS]", 1], ["[SEP]", 2]]
));
// END bert_setup_processor
// START bert_train_tokenizer
let { wordPieceTrainer } = require("tokenizers");
let trainer = wordPieceTrainer({
vocabSize: 30522,
specialTokens: ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
});
let files = ["test", "train", "valid"].map(split => `data/wikitext-103-raw/wiki.${split}.raw`);
bertTokenizer.train(files, trainer);
bertTokenizer.save("data/bert-wiki.json")
// END bert_train_tokenizer
});
it("shows a full bert example", async () => {
let { Tokenizer } = require("tokenizers");
let bertTokenizer = await Tokenizer.fromFile("data/bert-wiki.json")
// START bert_test_decoding
let output = await bertTokenizer.encode("Welcome to the π€ Tokenizers library.");
console.log(output.getTokens());
// ["[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]"]
var decoded = await bertTokenizer.decode(output.getIds(), true);
// "welcome to the tok ##eni ##zer ##s library ."
// END bert_test_decoding
expect(decoded).toEqual("welcome to the tok ##eni ##zer ##s library .");
// START bert_proper_decoding
let { wordPieceDecoder } = require("tokenizers");
bertTokenizer.setDecoder(wordPieceDecoder());
var decoded = await bertTokenizer.decode(output.getIds(), true);
// "welcome to the tokenizers library."
// END bert_proper_decoding
expect(decoded).toEqual("welcome to the tokenizers library.");
});
});
|
tokenizers/bindings/node/examples/documentation/pipeline.test.ts/0
|
{
"file_path": "tokenizers/bindings/node/examples/documentation/pipeline.test.ts",
"repo_id": "tokenizers",
"token_count": 2710
}
| 249
|
# `tokenizers-android-arm-eabi`
This is the **armv7-linux-androideabi** binary for `tokenizers`
|
tokenizers/bindings/node/npm/android-arm-eabi/README.md/0
|
{
"file_path": "tokenizers/bindings/node/npm/android-arm-eabi/README.md",
"repo_id": "tokenizers",
"token_count": 35
}
| 250
|
# `tokenizers-linux-x64-gnu`
This is the **x86_64-unknown-linux-gnu** binary for `tokenizers`
|
tokenizers/bindings/node/npm/linux-x64-gnu/README.md/0
|
{
"file_path": "tokenizers/bindings/node/npm/linux-x64-gnu/README.md",
"repo_id": "tokenizers",
"token_count": 36
}
| 251
|
use crate::arc_rwlock_serde;
use crate::tasks::models::{BPEFromFilesTask, WordLevelFromFilesTask, WordPieceFromFilesTask};
use crate::trainers::Trainer;
use napi::bindgen_prelude::*;
use napi_derive::napi;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::{Arc, RwLock};
use tokenizers as tk;
use tokenizers::models::bpe::{BpeBuilder, Merges, Vocab};
use tokenizers::models::wordlevel::WordLevelBuilder;
use tokenizers::models::wordpiece::WordPieceBuilder;
#[napi]
#[derive(Clone, Serialize, Deserialize)]
pub struct Model {
#[serde(flatten, with = "arc_rwlock_serde")]
pub(crate) model: Option<Arc<RwLock<tk::models::ModelWrapper>>>,
}
impl<M> From<M> for Model
where
M: Into<tk::models::ModelWrapper>,
{
fn from(wrapper: M) -> Self {
Self {
model: Some(Arc::new(RwLock::new(wrapper.into()))),
}
}
}
#[napi(js_name = "BPE")]
pub struct Bpe {}
#[napi]
impl Bpe {
#[napi(factory, ts_return_type = "Model")]
pub fn empty() -> Result<Model> {
let bpe = tk::models::bpe::BPE::default();
Ok(Model {
model: Some(Arc::new(RwLock::new(bpe.into()))),
})
}
#[napi(factory, ts_return_type = "Model")]
pub fn init(vocab: Vocab, merges: Merges, options: Option<BpeOptions>) -> Result<Model> {
let options = options.unwrap_or_default();
let mut builder = tk::models::bpe::BPE::builder().vocab_and_merges(vocab, merges);
builder = options.apply_to_bpe_builder(builder);
let model = builder
.build()
.map_err(|e| Error::from_reason(e.to_string()))?;
Ok(Model {
model: Some(Arc::new(RwLock::new(model.into()))),
})
}
#[napi(ts_return_type = "Promise<Model>")]
pub fn from_file(
vocab: String,
merges: String,
options: Option<BpeOptions>,
) -> AsyncTask<BPEFromFilesTask> {
let options = options.unwrap_or_default();
let mut builder = tk::models::bpe::BPE::from_file(&vocab, &merges);
builder = options.apply_to_bpe_builder(builder);
AsyncTask::new(BPEFromFilesTask {
builder: Some(builder),
})
}
}
impl tk::Model for Model {
type Trainer = Trainer;
fn tokenize(&self, sequence: &str) -> tk::Result<Vec<tk::Token>> {
self
.model
.as_ref()
.ok_or("Uninitialized Model")?
.read()
.unwrap()
.tokenize(sequence)
}
fn token_to_id(&self, token: &str) -> Option<u32> {
self.model.as_ref()?.read().unwrap().token_to_id(token)
}
fn id_to_token(&self, id: u32) -> Option<String> {
self.model.as_ref()?.read().unwrap().id_to_token(id)
}
fn get_vocab(&self) -> HashMap<String, u32> {
self
.model
.as_ref()
.expect("Uninitialized Model")
.read()
.unwrap()
.get_vocab()
}
fn get_vocab_size(&self) -> usize {
self
.model
.as_ref()
.expect("Uninitialized Model")
.read()
.unwrap()
.get_vocab_size()
}
fn save(&self, folder: &Path, name: Option<&str>) -> tk::Result<Vec<PathBuf>> {
self
.model
.as_ref()
.ok_or("Uninitialized Model")?
.read()
.unwrap()
.save(folder, name)
}
fn get_trainer(&self) -> Self::Trainer {
self
.model
.as_ref()
.expect("Uninitialized Model")
.read()
.unwrap()
.get_trainer()
.into()
}
}
#[derive(Default)]
#[napi(object)]
pub struct BpeOptions {
pub cache_capacity: Option<u32>,
pub dropout: Option<f64>,
pub unk_token: Option<String>,
pub continuing_subword_prefix: Option<String>,
pub end_of_word_suffix: Option<String>,
pub fuse_unk: Option<bool>,
pub byte_fallback: Option<bool>,
}
impl BpeOptions {
fn apply_to_bpe_builder(self, mut builder: BpeBuilder) -> BpeBuilder {
if let Some(cache_capacity) = self.cache_capacity {
builder = builder.cache_capacity(cache_capacity as usize);
}
if let Some(dropout) = self.dropout {
builder = builder.dropout(dropout as f32);
}
if let Some(unk_token) = self.unk_token {
builder = builder.unk_token(unk_token);
}
if let Some(continuing_subword_prefix) = self.continuing_subword_prefix {
builder = builder.continuing_subword_prefix(continuing_subword_prefix);
}
if let Some(end_of_word_suffix) = self.end_of_word_suffix {
builder = builder.end_of_word_suffix(end_of_word_suffix);
}
if let Some(fuse_unk) = self.fuse_unk {
builder = builder.fuse_unk(fuse_unk);
}
if let Some(byte_fallback) = self.byte_fallback {
builder = builder.byte_fallback(byte_fallback);
}
builder
}
}
#[derive(Default)]
#[napi(object)]
pub struct WordPieceOptions {
pub unk_token: Option<String>,
pub continuing_subword_prefix: Option<String>,
pub max_input_chars_per_word: Option<u32>,
}
impl WordPieceOptions {
fn apply_to_wordpiece_builder(self, mut builder: WordPieceBuilder) -> WordPieceBuilder {
if let Some(token) = self.unk_token {
builder = builder.unk_token(token);
}
if let Some(prefix) = self.continuing_subword_prefix {
builder = builder.continuing_subword_prefix(prefix);
}
if let Some(max) = self.max_input_chars_per_word {
builder = builder.max_input_chars_per_word(max as usize);
}
builder
}
}
#[napi]
pub struct WordPiece {}
#[napi]
impl WordPiece {
#[napi(factory, ts_return_type = "Model")]
pub fn init(vocab: Vocab, options: Option<WordPieceOptions>) -> Result<Model> {
let options = options.unwrap_or_default();
let mut builder = tk::models::wordpiece::WordPiece::builder().vocab(vocab);
builder = options.apply_to_wordpiece_builder(builder);
let model = builder
.build()
.map_err(|e| Error::from_reason(e.to_string()))?;
Ok(Model {
model: Some(Arc::new(RwLock::new(model.into()))),
})
}
#[napi(factory)]
pub fn empty() -> Model {
let wordpiece = tk::models::wordpiece::WordPiece::default();
Model {
model: Some(Arc::new(RwLock::new(wordpiece.into()))),
}
}
#[napi(ts_return_type = "Promise<Model>")]
pub fn from_file(
vocab: String,
options: Option<WordPieceOptions>,
) -> AsyncTask<WordPieceFromFilesTask> {
let options = options.unwrap_or_default();
let mut builder = tk::models::wordpiece::WordPiece::from_file(&vocab);
builder = options.apply_to_wordpiece_builder(builder);
AsyncTask::new(WordPieceFromFilesTask {
builder: Some(builder),
})
}
}
#[derive(Default)]
#[napi(object)]
pub struct WordLevelOptions {
pub unk_token: Option<String>,
}
impl WordLevelOptions {
fn apply_to_wordlevel_builder(self, mut builder: WordLevelBuilder) -> WordLevelBuilder {
if let Some(token) = self.unk_token {
builder = builder.unk_token(token);
}
builder
}
}
#[napi]
pub struct WordLevel {}
#[napi]
impl WordLevel {
#[napi(factory, ts_return_type = "Model")]
pub fn init(vocab: Vocab, options: Option<WordLevelOptions>) -> Result<Model> {
let options = options.unwrap_or_default();
let mut builder = tk::models::wordlevel::WordLevel::builder().vocab(vocab);
builder = options.apply_to_wordlevel_builder(builder);
let model = builder
.build()
.map_err(|e| Error::from_reason(e.to_string()))?;
Ok(Model {
model: Some(Arc::new(RwLock::new(model.into()))),
})
}
#[napi(factory)]
pub fn empty() -> Model {
let wordlevel = tk::models::wordlevel::WordLevel::default();
Model {
model: Some(Arc::new(RwLock::new(wordlevel.into()))),
}
}
#[napi(ts_return_type = "Promise<Model>")]
pub fn from_file(
vocab: String,
options: Option<WordLevelOptions>,
) -> AsyncTask<WordLevelFromFilesTask> {
let options = options.unwrap_or_default();
let mut builder = tk::models::wordlevel::WordLevel::builder().files(vocab);
builder = options.apply_to_wordlevel_builder(builder);
AsyncTask::new(WordLevelFromFilesTask {
builder: Some(builder),
})
}
}
#[derive(Default)]
#[napi(object)]
pub struct UnigramOptions {
pub unk_id: Option<u32>,
pub byte_fallback: Option<bool>,
}
#[napi]
pub struct Unigram {}
#[napi]
impl Unigram {
#[napi(factory, ts_return_type = "Model")]
pub fn init(vocab: Vec<(String, f64)>, options: Option<UnigramOptions>) -> Result<Model> {
let options = options.unwrap_or_default();
let unigram = tk::models::unigram::Unigram::from(
vocab,
options.unk_id.map(|u| u as usize),
options.byte_fallback.unwrap_or(false),
)
.map_err(|e| Error::from_reason(e.to_string()))?;
Ok(Model {
model: Some(Arc::new(RwLock::new(unigram.into()))),
})
}
#[napi(factory, ts_return_type = "Model")]
pub fn empty() -> Model {
let unigram = tk::models::unigram::Unigram::default();
Model {
model: Some(Arc::new(RwLock::new(unigram.into()))),
}
}
}
|
tokenizers/bindings/node/src/models.rs/0
|
{
"file_path": "tokenizers/bindings/node/src/models.rs",
"repo_id": "tokenizers",
"token_count": 3681
}
| 252
|
[package]
name = "tokenizers-python"
version = "0.20.0-dev.0"
authors = ["Anthony MOI <m.anthony.moi@gmail.com>"]
edition = "2021"
[lib]
name = "tokenizers"
crate-type = ["cdylib"]
[dependencies]
rayon = "1.10"
serde = { version = "1.0", features = [ "rc", "derive" ]}
serde_json = "1.0"
libc = "0.2"
env_logger = "0.11"
pyo3 = { version = "0.21" }
numpy = "0.21"
ndarray = "0.15"
itertools = "0.12"
[dependencies.tokenizers]
path = "../../tokenizers"
[dev-dependencies]
tempfile = "3.10"
pyo3 = { version = "0.21", features = ["auto-initialize"] }
[features]
defaut = ["pyo3/extension-module"]
|
tokenizers/bindings/python/Cargo.toml/0
|
{
"file_path": "tokenizers/bindings/python/Cargo.toml",
"repo_id": "tokenizers",
"token_count": 266
}
| 253
|
from .base_tokenizer import BaseTokenizer
from .bert_wordpiece import BertWordPieceTokenizer
from .byte_level_bpe import ByteLevelBPETokenizer
from .char_level_bpe import CharBPETokenizer
from .sentencepiece_bpe import SentencePieceBPETokenizer
from .sentencepiece_unigram import SentencePieceUnigramTokenizer
|
tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py/0
|
{
"file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py",
"repo_id": "tokenizers",
"token_count": 94
}
| 254
|
.tokenized-text {
width:100%;
padding:2rem;
max-height: 400px;
overflow-y: auto;
box-sizing:border-box;
line-height:4rem; /* Lots of space between lines */
font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace;
box-shadow: 2px 2px 2px rgba(0,0,0,0.2);
background-color: rgba(0,0,0,0.01);
letter-spacing:2px; /* Give some extra separation between chars */
}
.non-token{
/* White space and other things the tokenizer ignores*/
white-space: pre;
letter-spacing:4px;
border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/
border-bottom:1px solid #A0A0A0;
line-height: 1rem;
height: calc(100% - 2px);
}
.token {
white-space: pre;
position:relative;
color:black;
letter-spacing:2px;
}
.annotation{
white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */
border-radius:4px;
position:relative;
width:fit-content;
}
.annotation:before {
/*The before holds the text and the after holds the background*/
z-index:1000; /* Make sure this is above the background */
content:attr(data-label); /* The annotations label is on a data attribute */
color:white;
position:absolute;
font-size:1rem;
text-align:center;
font-weight:bold;
top:1.75rem;
line-height:0;
left:0;
width:100%;
padding:0.5rem 0;
/* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/
overflow: hidden;
white-space: nowrap;
text-overflow:ellipsis;
}
.annotation:after {
content:attr(data-label); /* The content defines the width of the annotation*/
position:absolute;
font-size:0.75rem;
text-align:center;
font-weight:bold;
text-overflow:ellipsis;
top:1.75rem;
line-height:0;
overflow: hidden;
white-space: nowrap;
left:0;
width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
padding:0.5rem 0;
/* Nast hack below:
We set the annotations color in code because we don't know the colors at css time.
But you can't pass a color as a data attribute to get it into the pseudo element (this thing)
So to get around that, annotations have the color set on them with a style attribute and then we
can get the color with currentColor.
Annotations wrap tokens and tokens set the color back to black
*/
background-color: currentColor;
}
.annotation:hover::after, .annotation:hover::before{
/* When the user hovers over an annotation expand the label to display in full
*/
min-width: fit-content;
}
.annotation:hover{
/* Emphasize the annotation start end with a border on hover*/
border-color: currentColor;
border: 2px solid;
}
.special-token:not(:empty){
/*
A none empty special token is like UNK (as opposed to CLS which has no representation in the text )
*/
position:relative;
}
.special-token:empty::before{
/* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/
content:attr(data-stok);
background:#202020;
font-size:0.75rem;
color:white;
margin: 0 0.25rem;
padding: 0.25rem;
border-radius:4px
}
.special-token:not(:empty):before {
/* Special tokens that have text (UNK) are displayed above the actual text*/
content:attr(data-stok);
position:absolute;
bottom:1.75rem;
min-width:100%;
width:100%;
height:1rem;
line-height:1rem;
font-size:1rem;
text-align:center;
color:white;
font-weight:bold;
background:#202020;
border-radius:10%;
}
/*
We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations
instead we apply even and odd class at generation time and color them that way
*/
.even-token{
background:#DCDCDC ;
border: 1px solid #DCDCDC;
}
.odd-token{
background:#A0A0A0;
border: 1px solid #A0A0A0;
}
.even-token.multi-token,.odd-token.multi-token{
background: repeating-linear-gradient(
45deg,
transparent,
transparent 1px,
#ccc 1px,
#ccc 1px
),
/* on "bottom" */
linear-gradient(
to bottom,
#FFB6C1,
#999
);
}
.multi-token:hover::after {
content:"This char has more than 1 token"; /* The content defines the width of the annotation*/
color:white;
background-color: black;
position:absolute;
font-size:0.75rem;
text-align:center;
font-weight:bold;
text-overflow:ellipsis;
top:1.75rem;
line-height:0;
overflow: hidden;
white-space: nowrap;
left:0;
width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
padding:0.5rem 0;
}
|
tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css/0
|
{
"file_path": "tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css",
"repo_id": "tokenizers",
"token_count": 1806
}
| 255
|
use std::sync::{Arc, RwLock};
use pyo3::exceptions;
use pyo3::prelude::*;
use pyo3::types::*;
use serde::ser::SerializeStruct;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use tk::normalizer::SplitDelimiterBehavior;
use tk::pre_tokenizers::bert::BertPreTokenizer;
use tk::pre_tokenizers::byte_level::ByteLevel;
use tk::pre_tokenizers::delimiter::CharDelimiterSplit;
use tk::pre_tokenizers::digits::Digits;
use tk::pre_tokenizers::metaspace::{Metaspace, PrependScheme};
use tk::pre_tokenizers::punctuation::Punctuation;
use tk::pre_tokenizers::split::Split;
use tk::pre_tokenizers::unicode_scripts::UnicodeScripts;
use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit};
use tk::pre_tokenizers::PreTokenizerWrapper;
use tk::tokenizer::Offsets;
use tk::{PreTokenizedString, PreTokenizer};
use tokenizers as tk;
use super::error::ToPyResult;
use super::utils::*;
/// Base class for all pre-tokenizers
///
/// This class is not supposed to be instantiated directly. Instead, any implementation of a
/// PreTokenizer will return an instance of this class when instantiated.
#[pyclass(
dict,
module = "tokenizers.pre_tokenizers",
name = "PreTokenizer",
subclass
)]
#[derive(Clone, Serialize, Deserialize)]
#[serde(transparent)]
pub struct PyPreTokenizer {
pub(crate) pretok: PyPreTokenizerTypeWrapper,
}
impl PyPreTokenizer {
#[allow(dead_code)]
pub(crate) fn new(pretok: PyPreTokenizerTypeWrapper) -> Self {
PyPreTokenizer { pretok }
}
pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> {
let base = self.clone();
Ok(match &self.pretok {
PyPreTokenizerTypeWrapper::Sequence(_) => {
Py::new(py, (PySequence {}, base))?.into_py(py)
}
PyPreTokenizerTypeWrapper::Single(ref inner) => {
match &*inner.as_ref().read().unwrap() {
PyPreTokenizerWrapper::Custom(_) => Py::new(py, base)?.into_py(py),
PyPreTokenizerWrapper::Wrapped(inner) => match inner {
PreTokenizerWrapper::Whitespace(_) => {
Py::new(py, (PyWhitespace {}, base))?.into_py(py)
}
PreTokenizerWrapper::Split(_) => {
Py::new(py, (PySplit {}, base))?.into_py(py)
}
PreTokenizerWrapper::Punctuation(_) => {
Py::new(py, (PyPunctuation {}, base))?.into_py(py)
}
PreTokenizerWrapper::Sequence(_) => {
Py::new(py, (PySequence {}, base))?.into_py(py)
}
PreTokenizerWrapper::Metaspace(_) => {
Py::new(py, (PyMetaspace {}, base))?.into_py(py)
}
PreTokenizerWrapper::Delimiter(_) => {
Py::new(py, (PyCharDelimiterSplit {}, base))?.into_py(py)
}
PreTokenizerWrapper::WhitespaceSplit(_) => {
Py::new(py, (PyWhitespaceSplit {}, base))?.into_py(py)
}
PreTokenizerWrapper::ByteLevel(_) => {
Py::new(py, (PyByteLevel {}, base))?.into_py(py)
}
PreTokenizerWrapper::BertPreTokenizer(_) => {
Py::new(py, (PyBertPreTokenizer {}, base))?.into_py(py)
}
PreTokenizerWrapper::Digits(_) => {
Py::new(py, (PyDigits {}, base))?.into_py(py)
}
PreTokenizerWrapper::UnicodeScripts(_) => {
Py::new(py, (PyUnicodeScripts {}, base))?.into_py(py)
}
},
}
}
})
}
}
impl PreTokenizer for PyPreTokenizer {
fn pre_tokenize(&self, normalized: &mut PreTokenizedString) -> tk::Result<()> {
self.pretok.pre_tokenize(normalized)
}
}
#[pymethods]
impl PyPreTokenizer {
#[staticmethod]
fn custom(pretok: PyObject) -> Self {
PyPreTokenizer {
pretok: PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(pretok)).into(),
}
}
fn __getstate__(&self, py: Python) -> PyResult<PyObject> {
let data = serde_json::to_string(&self.pretok).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to pickle PreTokenizer: {}",
e
))
})?;
Ok(PyBytes::new_bound(py, data.as_bytes()).to_object(py))
}
fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> {
match state.extract::<&PyBytes>(py) {
Ok(s) => {
let unpickled = serde_json::from_slice(s.as_bytes()).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to unpickle PreTokenizer: {}",
e
))
})?;
self.pretok = unpickled;
Ok(())
}
Err(e) => Err(e),
}
}
/// Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
///
/// This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
/// keep track of the pre-tokenization, and leverage the capabilities of the
/// :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
/// the pre-tokenization of a raw string, you can use
/// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
///
/// Args:
/// pretok (:class:`~tokenizers.PreTokenizedString):
/// The pre-tokenized string on which to apply this
/// :class:`~tokenizers.pre_tokenizers.PreTokenizer`
#[pyo3(text_signature = "(self, pretok)")]
fn pre_tokenize(&self, pretok: &mut PyPreTokenizedString) -> PyResult<()> {
ToPyResult(self.pretok.pre_tokenize(&mut pretok.pretok)).into()
}
/// Pre tokenize the given string
///
/// This method provides a way to visualize the effect of a
/// :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
/// alignment, nor does it provide all the capabilities of the
/// :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
/// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
///
/// Args:
/// sequence (:obj:`str`):
/// A string to pre-tokeize
///
/// Returns:
/// :obj:`List[Tuple[str, Offsets]]`:
/// A list of tuple with the pre-tokenized parts and their offsets
#[pyo3(text_signature = "(self, sequence)")]
fn pre_tokenize_str(&self, s: &str) -> PyResult<Vec<(String, Offsets)>> {
let mut pretokenized = tk::tokenizer::PreTokenizedString::from(s);
ToPyResult(self.pretok.pre_tokenize(&mut pretokenized)).into_py()?;
Ok(pretokenized
.get_splits(tk::OffsetReferential::Original, tk::OffsetType::Char)
.into_iter()
.map(|(s, o, _)| (s.to_owned(), o))
.collect())
}
fn __repr__(&self) -> PyResult<String> {
crate::utils::serde_pyo3::repr(self)
.map_err(|e| exceptions::PyException::new_err(e.to_string()))
}
fn __str__(&self) -> PyResult<String> {
crate::utils::serde_pyo3::to_string(self)
.map_err(|e| exceptions::PyException::new_err(e.to_string()))
}
}
macro_rules! getter {
($self: ident, $variant: ident, $($name: tt)+) => {{
let super_ = $self.as_ref();
if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok {
if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref pretok)) =
*single.read().unwrap() {
pretok.$($name)+
} else {
unreachable!()
}
} else {
unreachable!()
}
}};
}
macro_rules! setter {
($self: ident, $variant: ident, $name: ident, $value: expr) => {{
let super_ = $self.as_ref();
if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok {
if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) =
*single.write().unwrap()
{
pretok.$name = $value;
}
}
}};
($self: ident, $variant: ident, @$name: ident, $value: expr) => {{
let super_ = $self.as_ref();
if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok {
if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) =
*single.write().unwrap()
{
pretok.$name($value);
}
}
}};
}
/// ByteLevel PreTokenizer
///
/// This pre-tokenizer takes care of replacing all bytes of the given string
/// with a corresponding representation, as well as splitting into words.
///
/// Args:
/// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
/// Whether to add a space to the first word if there isn't already one. This
/// lets us treat `hello` exactly like `say hello`.
/// use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`):
/// Set this to :obj:`False` to prevent this `pre_tokenizer` from using
/// the GPT2 specific regexp for spliting on whitespace.
#[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "ByteLevel")]
pub struct PyByteLevel {}
#[pymethods]
impl PyByteLevel {
#[getter]
fn get_add_prefix_space(self_: PyRef<Self>) -> bool {
getter!(self_, ByteLevel, add_prefix_space)
}
#[setter]
fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) {
setter!(self_, ByteLevel, add_prefix_space, add_prefix_space);
}
#[getter]
fn get_use_regex(self_: PyRef<Self>) -> bool {
getter!(self_, ByteLevel, use_regex)
}
#[setter]
fn set_use_regex(self_: PyRef<Self>, use_regex: bool) {
setter!(self_, ByteLevel, use_regex, use_regex);
}
#[new]
#[pyo3(signature = (add_prefix_space = true, use_regex = true, **_kwargs), text_signature = "(self, add_prefix_space=True, use_regex=True)")]
fn new(
add_prefix_space: bool,
use_regex: bool,
_kwargs: Option<&Bound<'_, PyDict>>,
) -> (Self, PyPreTokenizer) {
(
PyByteLevel {},
ByteLevel::default()
.add_prefix_space(add_prefix_space)
.use_regex(use_regex)
.into(),
)
}
/// Returns the alphabet used by this PreTokenizer.
///
/// Since the ByteLevel works as its name suggests, at the byte level, it
/// encodes each byte value to a unique visible character. This means that there is a
/// total of 256 different characters composing this alphabet.
///
/// Returns:
/// :obj:`List[str]`: A list of characters that compose the alphabet
#[staticmethod]
#[pyo3(text_signature = "()")]
fn alphabet() -> Vec<String> {
ByteLevel::alphabet()
.into_iter()
.map(|c| c.to_string())
.collect()
}
}
/// This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+`
#[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Whitespace")]
pub struct PyWhitespace {}
#[pymethods]
impl PyWhitespace {
#[new]
#[pyo3(text_signature = "(self)")]
fn new() -> (Self, PyPreTokenizer) {
(PyWhitespace {}, Whitespace {}.into())
}
}
/// This pre-tokenizer simply splits on the whitespace. Works like `.split()`
#[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "WhitespaceSplit")]
pub struct PyWhitespaceSplit {}
#[pymethods]
impl PyWhitespaceSplit {
#[new]
#[pyo3(text_signature = "(self)")]
fn new() -> (Self, PyPreTokenizer) {
(PyWhitespaceSplit {}, WhitespaceSplit.into())
}
}
/// Split PreTokenizer
///
/// This versatile pre-tokenizer splits using the provided pattern and
/// according to the provided behavior. The pattern can be inverted by
/// making use of the invert flag.
///
/// Args:
/// pattern (:obj:`str` or :class:`~tokenizers.Regex`):
/// A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex`.
/// If you want to use a regex pattern, it has to be wrapped around a `tokenizer.Regex`,
/// otherwise we consider is as a string pattern. For example `pattern="|"`
/// means you want to split on `|` (imagine a csv file for example), while
/// `patter=tokenizer.Regex("1|2")` means you split on either '1' or '2'.
/// behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
/// The behavior to use when splitting.
/// Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
/// "contiguous"
///
/// invert (:obj:`bool`, `optional`, defaults to :obj:`False`):
/// Whether to invert the pattern.
#[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Split")]
pub struct PySplit {}
#[pymethods]
impl PySplit {
#[new]
#[pyo3(signature = (pattern, behavior, invert = false), text_signature = "(self, pattern, behavior, invert=False)")]
fn new(
pattern: PyPattern,
behavior: PySplitDelimiterBehavior,
invert: bool,
) -> PyResult<(Self, PyPreTokenizer)> {
Ok((
PySplit {},
ToPyResult(Split::new(pattern, behavior.into(), invert))
.into_py()?
.into(),
))
}
fn __getnewargs__<'p>(&self, py: Python<'p>) -> Bound<'p, PyTuple> {
PyTuple::new_bound(py, [" ", "removed"])
}
}
/// This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)`
///
/// Args:
/// delimiter: str:
/// The delimiter char that will be used to split input
#[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "CharDelimiterSplit")]
pub struct PyCharDelimiterSplit {}
#[pymethods]
impl PyCharDelimiterSplit {
#[getter]
fn get_delimiter(self_: PyRef<Self>) -> String {
getter!(self_, Delimiter, delimiter.to_string())
}
#[setter]
fn set_delimiter(self_: PyRef<Self>, delimiter: char) {
setter!(self_, Delimiter, delimiter, delimiter);
}
#[new]
#[pyo3(text_signature = None)]
pub fn new(delimiter: char) -> PyResult<(Self, PyPreTokenizer)> {
Ok((
PyCharDelimiterSplit {},
CharDelimiterSplit::new(delimiter).into(),
))
}
fn __getnewargs__<'p>(&self, py: Python<'p>) -> Bound<'p, PyTuple> {
PyTuple::new_bound(py, [" "])
}
}
/// BertPreTokenizer
///
/// This pre-tokenizer splits tokens on spaces, and also on punctuation.
/// Each occurence of a punctuation character will be treated separately.
#[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "BertPreTokenizer")]
pub struct PyBertPreTokenizer {}
#[pymethods]
impl PyBertPreTokenizer {
#[new]
#[pyo3(text_signature = "(self)")]
fn new() -> (Self, PyPreTokenizer) {
(PyBertPreTokenizer {}, BertPreTokenizer.into())
}
}
/// This pre-tokenizer simply splits on punctuation as individual characters.
///
/// Args:
/// behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
/// The behavior to use when splitting.
/// Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next",
/// "contiguous"
#[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Punctuation")]
pub struct PyPunctuation {}
#[pymethods]
impl PyPunctuation {
#[new]
#[pyo3( signature = (behavior = PySplitDelimiterBehavior(SplitDelimiterBehavior::Isolated)), text_signature = "(self, behavior=\"isolated\")")]
fn new(behavior: PySplitDelimiterBehavior) -> (Self, PyPreTokenizer) {
(PyPunctuation {}, Punctuation::new(behavior.into()).into())
}
}
/// This pre-tokenizer composes other pre_tokenizers and applies them in sequence
#[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Sequence")]
pub struct PySequence {}
#[pymethods]
impl PySequence {
#[new]
#[pyo3(text_signature = "(self, pretokenizers)")]
fn new(pre_tokenizers: &Bound<'_, PyList>) -> PyResult<(Self, PyPreTokenizer)> {
let mut sequence = Vec::with_capacity(pre_tokenizers.len());
for n in pre_tokenizers.iter() {
let pretokenizer: PyRef<PyPreTokenizer> = n.extract()?;
match &pretokenizer.pretok {
PyPreTokenizerTypeWrapper::Sequence(inner) => {
sequence.extend(inner.iter().cloned())
}
PyPreTokenizerTypeWrapper::Single(inner) => sequence.push(inner.clone()),
}
}
Ok((
PySequence {},
PyPreTokenizer::new(PyPreTokenizerTypeWrapper::Sequence(sequence)),
))
}
fn __getnewargs__<'p>(&self, py: Python<'p>) -> Bound<'p, PyTuple> {
PyTuple::new_bound(py, [PyList::empty_bound(py)])
}
fn __getitem__(self_: PyRef<'_, Self>, py: Python<'_>, index: usize) -> PyResult<Py<PyAny>> {
match &self_.as_ref().pretok {
PyPreTokenizerTypeWrapper::Sequence(inner) => match inner.get(index) {
Some(item) => {
PyPreTokenizer::new(PyPreTokenizerTypeWrapper::Single(Arc::clone(item)))
.get_as_subtype(py)
}
_ => Err(PyErr::new::<pyo3::exceptions::PyIndexError, _>(
"Index not found",
)),
},
PyPreTokenizerTypeWrapper::Single(inner) => {
PyPreTokenizer::new(PyPreTokenizerTypeWrapper::Single(Arc::clone(inner)))
.get_as_subtype(py)
}
}
}
}
pub(crate) fn from_string(string: String) -> Result<PrependScheme, PyErr> {
let scheme = match string.as_str() {
"first" => PrependScheme::First,
"never" => PrependScheme::Never,
"always" => PrependScheme::Always,
_ => {
return Err(exceptions::PyValueError::new_err(format!(
"{} is an unknown variant, should be one of ['first', 'never', 'always']",
string
)));
}
};
Ok(scheme)
}
/// Metaspace pre-tokenizer
///
/// This pre-tokenizer replaces any whitespace by the provided replacement character.
/// It then tries to split on these spaces.
///
/// Args:
/// replacement (:obj:`str`, `optional`, defaults to :obj:`β`):
/// The replacement character. Must be exactly one character. By default we
/// use the `β` (U+2581) meta symbol (Same as in SentencePiece).
///
/// prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`):
/// Whether to add a space to the first word if there isn't already one. This
/// lets us treat `hello` exactly like `say hello`.
/// Choices: "always", "never", "first". First means the space is only added on the first
/// token (relevant when special tokens are used or other pre_tokenizer are used).
///
#[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Metaspace")]
pub struct PyMetaspace {}
#[pymethods]
impl PyMetaspace {
#[getter]
fn get_replacement(self_: PyRef<Self>) -> String {
getter!(self_, Metaspace, get_replacement().to_string())
}
#[setter]
fn set_replacement(self_: PyRef<Self>, replacement: char) {
setter!(self_, Metaspace, @set_replacement, replacement);
}
#[getter]
fn get_split(self_: PyRef<Self>) -> bool {
getter!(self_, Metaspace, get_split())
}
#[setter]
fn set_split(self_: PyRef<Self>, split: bool) {
setter!(self_, Metaspace, @set_split, split);
}
#[getter]
fn get_prepend_scheme(self_: PyRef<Self>) -> String {
// Assuming Metaspace has a method to get the prepend_scheme as a string
let scheme: PrependScheme = getter!(self_, Metaspace, get_prepend_scheme());
match scheme {
PrependScheme::First => "first",
PrependScheme::Never => "never",
PrependScheme::Always => "always",
}
.to_string()
}
#[setter]
fn set_prepend_scheme(self_: PyRef<Self>, prepend_scheme: String) -> PyResult<()> {
let scheme = from_string(prepend_scheme)?;
setter!(self_, Metaspace, @set_prepend_scheme, scheme);
Ok(())
}
#[new]
#[pyo3(signature = (replacement = 'β', prepend_scheme=String::from("always"), split=true), text_signature = "(self, replacement=\"_\", prepend_scheme=\"always\", split=True)")]
fn new(
replacement: char,
prepend_scheme: String,
split: bool,
) -> PyResult<(Self, PyPreTokenizer)> {
// Create a new Metaspace instance
let prepend_scheme = from_string(prepend_scheme)?;
let new_instance: Metaspace = Metaspace::new(replacement, prepend_scheme, split);
Ok((PyMetaspace {}, new_instance.into()))
}
}
/// This pre-tokenizer simply splits using the digits in separate tokens
///
/// Args:
/// individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`):
/// If set to True, digits will each be separated as follows::
///
/// "Call 123 please" -> "Call ", "1", "2", "3", " please"
///
/// If set to False, digits will grouped as follows::
///
/// "Call 123 please" -> "Call ", "123", " please"
#[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Digits")]
pub struct PyDigits {}
#[pymethods]
impl PyDigits {
#[getter]
fn get_individual_digits(self_: PyRef<Self>) -> bool {
getter!(self_, Digits, individual_digits)
}
#[setter]
fn set_individual_digits(self_: PyRef<Self>, individual_digits: bool) {
setter!(self_, Digits, individual_digits, individual_digits);
}
#[new]
#[pyo3(signature = (individual_digits = false), text_signature = "(self, individual_digits=False)")]
fn new(individual_digits: bool) -> (Self, PyPreTokenizer) {
(PyDigits {}, Digits::new(individual_digits).into())
}
}
/// This pre-tokenizer splits on characters that belong to different language family
/// It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt
/// Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too.
/// This mimicks SentencePiece Unigram implementation.
#[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "UnicodeScripts")]
pub struct PyUnicodeScripts {}
#[pymethods]
impl PyUnicodeScripts {
#[new]
#[pyo3(text_signature = "(self)")]
fn new() -> (Self, PyPreTokenizer) {
(PyUnicodeScripts {}, UnicodeScripts::new().into())
}
}
#[derive(Clone)]
pub(crate) struct CustomPreTokenizer {
inner: PyObject,
}
impl CustomPreTokenizer {
pub fn new(inner: PyObject) -> Self {
Self { inner }
}
}
impl tk::tokenizer::PreTokenizer for CustomPreTokenizer {
fn pre_tokenize(&self, sentence: &mut PreTokenizedString) -> tk::Result<()> {
Python::with_gil(|py| {
let pretok = PyPreTokenizedStringRefMut::new(sentence);
let py_pretok = self.inner.bind(py);
py_pretok.call_method("pre_tokenize", (pretok.get(),), None)?;
Ok(())
})
}
}
impl Serialize for CustomPreTokenizer {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Err(serde::ser::Error::custom(
"Custom PreTokenizer cannot be serialized",
))
}
}
impl<'de> Deserialize<'de> for CustomPreTokenizer {
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Err(serde::de::Error::custom(
"Custom PreTokenizer cannot be deserialized",
))
}
}
#[derive(Clone, Deserialize)]
#[serde(untagged)]
pub(crate) enum PyPreTokenizerWrapper {
Custom(CustomPreTokenizer),
Wrapped(PreTokenizerWrapper),
}
impl Serialize for PyPreTokenizerWrapper {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where
S: Serializer,
{
match self {
PyPreTokenizerWrapper::Wrapped(inner) => inner.serialize(serializer),
PyPreTokenizerWrapper::Custom(inner) => inner.serialize(serializer),
}
}
}
#[derive(Clone, Deserialize)]
#[serde(untagged)]
pub(crate) enum PyPreTokenizerTypeWrapper {
Sequence(Vec<Arc<RwLock<PyPreTokenizerWrapper>>>),
Single(Arc<RwLock<PyPreTokenizerWrapper>>),
}
impl Serialize for PyPreTokenizerTypeWrapper {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
PyPreTokenizerTypeWrapper::Sequence(seq) => {
let mut ser = serializer.serialize_struct("Sequence", 2)?;
ser.serialize_field("type", "Sequence")?;
ser.serialize_field("pretokenizers", seq)?;
ser.end()
}
PyPreTokenizerTypeWrapper::Single(inner) => inner.serialize(serializer),
}
}
}
impl<I> From<I> for PyPreTokenizerWrapper
where
I: Into<PreTokenizerWrapper>,
{
fn from(pretok: I) -> Self {
PyPreTokenizerWrapper::Wrapped(pretok.into())
}
}
impl<I> From<I> for PyPreTokenizerTypeWrapper
where
I: Into<PyPreTokenizerWrapper>,
{
fn from(pretok: I) -> Self {
PyPreTokenizerTypeWrapper::Single(Arc::new(RwLock::new(pretok.into())))
}
}
impl<I> From<I> for PyPreTokenizer
where
I: Into<PreTokenizerWrapper>,
{
fn from(pretok: I) -> Self {
PyPreTokenizer {
pretok: pretok.into().into(),
}
}
}
impl PreTokenizer for PyPreTokenizerTypeWrapper {
fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> {
match self {
PyPreTokenizerTypeWrapper::Single(inner) => inner.read().unwrap().pre_tokenize(pretok),
PyPreTokenizerTypeWrapper::Sequence(inner) => inner
.iter()
.try_for_each(|n| n.read().unwrap().pre_tokenize(pretok)),
}
}
}
impl PreTokenizer for PyPreTokenizerWrapper {
fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> {
match self {
PyPreTokenizerWrapper::Wrapped(inner) => inner.pre_tokenize(pretok),
PyPreTokenizerWrapper::Custom(inner) => inner.pre_tokenize(pretok),
}
}
}
/// PreTokenizers Module
#[pymodule]
pub fn pre_tokenizers(m: &Bound<'_, PyModule>) -> PyResult<()> {
m.add_class::<PyPreTokenizer>()?;
m.add_class::<PyByteLevel>()?;
m.add_class::<PyWhitespace>()?;
m.add_class::<PyWhitespaceSplit>()?;
m.add_class::<PySplit>()?;
m.add_class::<PyBertPreTokenizer>()?;
m.add_class::<PyMetaspace>()?;
m.add_class::<PyCharDelimiterSplit>()?;
m.add_class::<PyPunctuation>()?;
m.add_class::<PySequence>()?;
m.add_class::<PyDigits>()?;
m.add_class::<PyUnicodeScripts>()?;
Ok(())
}
#[cfg(test)]
mod test {
use pyo3::prelude::*;
use tk::pre_tokenizers::sequence::Sequence;
use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit};
use tk::pre_tokenizers::PreTokenizerWrapper;
use crate::pre_tokenizers::{
CustomPreTokenizer, PyPreTokenizer, PyPreTokenizerTypeWrapper, PyPreTokenizerWrapper,
};
#[test]
fn get_subtype() {
Python::with_gil(|py| {
let py_norm = PyPreTokenizer::new(Whitespace {}.into());
let py_wsp = py_norm.get_as_subtype(py).unwrap();
assert_eq!("Whitespace", py_wsp.bind(py).get_type().qualname().unwrap());
})
}
#[test]
fn serialize() {
let py_wrapped: PyPreTokenizerWrapper = Whitespace {}.into();
let py_ser = serde_json::to_string(&py_wrapped).unwrap();
let rs_wrapped = PreTokenizerWrapper::Whitespace(Whitespace {});
let rs_ser = serde_json::to_string(&rs_wrapped).unwrap();
assert_eq!(py_ser, rs_ser);
let py_pretok: PyPreTokenizer = serde_json::from_str(&rs_ser).unwrap();
match py_pretok.pretok {
PyPreTokenizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() {
PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::Whitespace(_)) => {}
_ => panic!("Expected Whitespace"),
},
_ => panic!("Expected wrapped, not custom."),
}
let py_seq: PyPreTokenizerWrapper =
Sequence::new(vec![Whitespace {}.into(), WhitespaceSplit.into()]).into();
let py_wrapper_ser = serde_json::to_string(&py_seq).unwrap();
let rs_wrapped = PreTokenizerWrapper::Sequence(Sequence::new(vec![
Whitespace {}.into(),
WhitespaceSplit.into(),
]));
let rs_ser = serde_json::to_string(&rs_wrapped).unwrap();
assert_eq!(py_wrapper_ser, rs_ser);
let py_seq = PyPreTokenizer::new(py_seq.into());
let py_ser = serde_json::to_string(&py_seq).unwrap();
assert_eq!(py_wrapper_ser, py_ser);
let obj = Python::with_gil(|py| {
let py_wsp = PyPreTokenizer::new(Whitespace {}.into());
let obj: PyObject = Py::new(py, py_wsp).unwrap().into_py(py);
obj
});
let py_seq: PyPreTokenizerWrapper =
PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(obj));
assert!(serde_json::to_string(&py_seq).is_err());
}
}
|
tokenizers/bindings/python/src/pre_tokenizers.rs/0
|
{
"file_path": "tokenizers/bindings/python/src/pre_tokenizers.rs",
"repo_id": "tokenizers",
"token_count": 13648
}
| 256
|
import pytest
from tokenizers import BertWordPieceTokenizer
from ..utils import bert_files, data_dir
class TestEncoding:
@pytest.fixture(scope="class")
def encodings(self, bert_files):
tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"])
single_encoding = tokenizer.encode("I love HuggingFace")
pair_encoding = tokenizer.encode("I love HuggingFace", "Do you?")
return single_encoding, pair_encoding
def test_sequence_ids(self, encodings):
single, pair = encodings
assert single.sequence_ids == [None, 0, 0, 0, 0, None]
assert pair.sequence_ids == [None, 0, 0, 0, 0, None, 1, 1, 1, None]
def test_n_sequences(self, encodings):
single, pair = encodings
assert single.n_sequences == 1
assert pair.n_sequences == 2
def test_word_to_tokens(self, encodings):
single, pair = encodings
assert single.tokens == ["[CLS]", "i", "love", "hugging", "##face", "[SEP]"]
assert single.word_to_tokens(0) == (1, 2)
assert pair.tokens == [
"[CLS]",
"i",
"love",
"hugging",
"##face",
"[SEP]",
"do",
"you",
"?",
"[SEP]",
]
assert pair.word_to_tokens(0) == (1, 2)
assert pair.word_to_tokens(0, 0) == (1, 2)
assert pair.word_to_tokens(6, 0) == None
assert pair.word_to_tokens(0, 1) == (6, 7)
def test_word_to_chars(self, encodings):
single, pair = encodings
assert single.word_to_chars(2) == (7, 18)
assert pair.word_to_chars(2) == (7, 18)
assert pair.word_to_chars(2, 0) == (7, 18)
assert pair.word_to_chars(2, 1) == (6, 7)
def test_token_to_sequence(self, encodings):
single, pair = encodings
assert single.token_to_sequence(2) == 0
assert pair.token_to_sequence(2) == 0
assert pair.token_to_sequence(0) == None
assert pair.token_to_sequence(5) == None
assert pair.token_to_sequence(6) == 1
assert pair.token_to_sequence(8) == 1
assert pair.token_to_sequence(9) == None
assert pair.token_to_sequence(1200) == None
def test_token_to_chars(self, encodings):
single, pair = encodings
assert single.token_to_chars(0) == None
assert single.token_to_chars(2) == (2, 6)
assert pair.token_to_chars(2) == (2, 6)
assert pair.token_to_chars(5) == None
assert pair.token_to_chars(6) == (0, 2)
def test_token_to_word(self, encodings):
single, pair = encodings
assert single.token_to_word(0) == None
assert single.token_to_word(1) == 0
assert single.token_to_word(4) == 2
assert pair.token_to_word(1) == 0
assert pair.token_to_word(4) == 2
assert pair.token_to_word(5) == None
assert pair.token_to_word(6) == 0
assert pair.token_to_word(7) == 1
def test_char_to_token(self, encodings):
single, pair = encodings
assert single.char_to_token(0) == 1
assert pair.char_to_token(0) == 1
assert pair.char_to_token(0, 0) == 1
assert pair.char_to_token(1, 0) == None
assert pair.char_to_token(0, 1) == 6
assert pair.char_to_token(2, 1) == None
def test_char_to_word(self, encodings):
single, pair = encodings
assert single.char_to_word(0) == 0
assert single.char_to_word(1) == None
assert pair.char_to_word(2) == 1
assert pair.char_to_word(2, 0) == 1
assert pair.char_to_word(2, 1) == None
assert pair.char_to_word(3, 1) == 1
def test_truncation(self, encodings):
single, _ = encodings
single.truncate(2, 1, "right")
assert single.tokens == ["[CLS]", "i"]
assert single.overflowing[0].tokens == ["i", "love"]
def test_invalid_truncate_direction(self, encodings):
single, _ = encodings
with pytest.raises(ValueError) as excinfo:
single.truncate(2, 1, "not_a_direction")
assert "Invalid truncation direction value : not_a_direction" == str(excinfo.value)
|
tokenizers/bindings/python/tests/bindings/test_encoding.py/0
|
{
"file_path": "tokenizers/bindings/python/tests/bindings/test_encoding.py",
"repo_id": "tokenizers",
"token_count": 1991
}
| 257
|
import pytest
from tokenizers import SentencePieceBPETokenizer, SentencePieceUnigramTokenizer
class TestSentencePieceBPE:
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = SentencePieceBPETokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["βA", "βsentence"]
class TestSentencePieceUnigram:
def test_train(self, tmpdir):
p = tmpdir.mkdir("tmpdir").join("file.txt")
p.write("A first sentence\nAnother sentence\nAnd a last one")
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train(files=str(p), show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["βA", "β", "s", "en", "t", "en", "c", "e"]
with pytest.raises(Exception) as excinfo:
_ = tokenizer.encode("A sentence π€")
assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing"
def test_train_with_unk_token(self, tmpdir):
p = tmpdir.mkdir("tmpdir").join("file.txt")
p.write("A first sentence\nAnother sentence\nAnd a last one")
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train(files=str(p), show_progress=False, special_tokens=["<unk>"], unk_token="<unk>")
output = tokenizer.encode("A sentence π€")
assert output.ids[-1] == 0
assert output.tokens == ["βA", "β", "s", "en", "t", "en", "c", "e", "β", "π€"]
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["βA", "β", "s", "en", "t", "en", "c", "e"]
with pytest.raises(Exception) as excinfo:
_ = tokenizer.encode("A sentence π€")
assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing"
def test_train_from_iterator_with_unk_token(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train_from_iterator(
text, vocab_size=100, show_progress=False, special_tokens=["<unk>"], unk_token="<unk>"
)
output = tokenizer.encode("A sentence π€")
assert output.ids[-1] == 0
assert output.tokens == ["βA", "β", "s", "en", "t", "en", "c", "e", "β", "π€"]
|
tokenizers/bindings/python/tests/implementations/test_sentencepiece.py/0
|
{
"file_path": "tokenizers/bindings/python/tests/implementations/test_sentencepiece.py",
"repo_id": "tokenizers",
"token_count": 1118
}
| 258
|
# Trainers
<tokenizerslangcontent>
<python>
## BpeTrainer
[[autodoc]] tokenizers.trainers.BpeTrainer
## UnigramTrainer
[[autodoc]] tokenizers.trainers.UnigramTrainer
## WordLevelTrainer
[[autodoc]] tokenizers.trainers.WordLevelTrainer
## WordPieceTrainer
[[autodoc]] tokenizers.trainers.WordPieceTrainer
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent>
|
tokenizers/docs/source-doc-builder/api/trainers.mdx/0
|
{
"file_path": "tokenizers/docs/source-doc-builder/api/trainers.mdx",
"repo_id": "tokenizers",
"token_count": 183
}
| 259
|
/* Our DOM objects */
/* Version control */
.selectors {
margin-bottom: 10px;
}
.dropdown-button {
display: inline-block;
width: 50%;
background-color: #6670FF;
color: white;
border: none;
padding: 5px;
font-size: 15px;
cursor: pointer;
}
.dropdown-button:hover, .dropdown-button:focus, .dropdown-button.active {
background-color: #A6B0FF;
}
.dropdown-button.active {
background-color: #7988FF;
}
.menu-dropdown {
display: none;
background-color: #7988FF;
min-width: 160px;
overflow: auto;
font-size: 15px;
padding: 10px 0;
}
.menu-dropdown a {
color: white;
padding: 3px 4px;
text-decoration: none;
display: block;
}
.menu-dropdown a:hover {
background-color: #A6B0FF;
}
.dropdown-link.active {
background-color: #A6B0FF;
}
.show {
display: block;
}
/* The literal code blocks */
.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal {
color: #6670FF;
}
/* To keep the logo centered */
.wy-side-scroll {
width: auto;
font-size: 20px;
}
/* The div that holds the Hugging Face logo */
.HuggingFaceDiv {
width: 100%
}
/* The research field on top of the toc tree */
.wy-side-nav-search{
padding-top: 0;
background-color: #6670FF;
}
/* The toc tree */
.wy-nav-side{
background-color: #6670FF;
padding-bottom: 0;
}
/* The section headers in the toc tree */
.wy-menu-vertical p.caption{
background-color: #4d59ff;
line-height: 40px;
}
/* The selected items in the toc tree */
.wy-menu-vertical li.current{
background-color: #A6B0FF;
}
/* When a list item that does belong to the selected block from the toc tree is hovered */
.wy-menu-vertical li.current a:hover{
background-color: #B6C0FF;
}
/* When a list item that does NOT belong to the selected block from the toc tree is hovered. */
.wy-menu-vertical li a:hover{
background-color: #A7AFFB;
}
/* The text items on the toc tree */
.wy-menu-vertical a {
color: #FFFFDD;
font-family: Calibre-Light, sans-serif;
}
.wy-menu-vertical header, .wy-menu-vertical p.caption{
color: white;
font-family: Calibre-Light, sans-serif;
}
/* The color inside the selected toc tree block */
.wy-menu-vertical li.toctree-l2 a, .wy-menu-vertical li.toctree-l3 a, .wy-menu-vertical li.toctree-l4 a {
color: black;
}
/* Inside the depth-2 selected toc tree block */
.wy-menu-vertical li.toctree-l2.current>a {
background-color: #B6C0FF
}
.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a {
background-color: #C6D0FF
}
/* Inside the depth-3 selected toc tree block */
.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{
background-color: #D6E0FF
}
/* Inside code snippets */
.rst-content dl:not(.docutils) dt{
font-size: 15px;
}
/* Links */
a {
color: #6670FF;
}
/* Content bars */
.rst-content dl:not(.docutils) dt {
background-color: rgba(251, 141, 104, 0.1);
border-right: solid 2px #FB8D68;
border-left: solid 2px #FB8D68;
color: #FB8D68;
font-family: Calibre-Light, sans-serif;
border-top: none;
font-style: normal !important;
}
/* Expand button */
.wy-menu-vertical li.toctree-l2 span.toctree-expand,
.wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.current>a span.toctree-expand,
.wy-menu-vertical li.toctree-l3 span.toctree-expand{
color: black;
}
/* Max window size */
.wy-nav-content{
max-width: 1200px;
}
/* Mobile header */
.wy-nav-top{
background-color: #6670FF;
}
/* Source spans */
.rst-content .viewcode-link, .rst-content .viewcode-back{
color: #6670FF;
font-size: 110%;
letter-spacing: 2px;
text-transform: uppercase;
}
/* It would be better for table to be visible without horizontal scrolling */
.wy-table-responsive table td, .wy-table-responsive table th{
white-space: normal;
}
.footer {
margin-top: 20px;
}
.footer__Social {
display: flex;
flex-direction: row;
}
.footer__CustomImage {
margin: 2px 5px 0 0;
}
/* class and method names in doc */
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) code.descclassname{
font-family: Calibre, sans-serif;
font-size: 20px !important;
}
/* class name in doc*/
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname{
margin-right: 10px;
font-family: Calibre-Medium, sans-serif;
}
/* Method and class parameters */
.sig-param{
line-height: 23px;
}
/* Class introduction "class" string at beginning */
.rst-content dl:not(.docutils) .property{
font-size: 18px;
color: black;
}
/* FONTS */
body{
font-family: Calibre, sans-serif;
font-size: 16px;
}
h1 {
font-family: Calibre-Thin, sans-serif;
font-size: 70px;
}
h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend{
font-family: Calibre-Medium, sans-serif;
}
@font-face {
font-family: Calibre-Medium;
src: url(./Calibre-Medium.otf);
font-weight:400;
}
@font-face {
font-family: Calibre;
src: url(./Calibre-Regular.otf);
font-weight:400;
}
@font-face {
font-family: Calibre-Light;
src: url(./Calibre-Light.ttf);
font-weight:400;
}
@font-face {
font-family: Calibre-Thin;
src: url(./Calibre-Thin.otf);
font-weight:400;
}
/**
* Nav Links to other parts of huggingface.co
*/
div.hf-menu {
position: absolute;
top: 0;
right: 0;
padding-top: 20px;
padding-right: 20px;
z-index: 1000;
}
div.hf-menu a {
font-size: 14px;
letter-spacing: 0.3px;
text-transform: uppercase;
color: white;
-webkit-font-smoothing: antialiased;
background: linear-gradient(0deg, #6671ffb8, #9a66ffb8 50%);
padding: 10px 16px 6px 16px;
border-radius: 3px;
margin-left: 12px;
position: relative;
}
div.hf-menu a:active {
top: 1px;
}
@media (min-width: 768px) and (max-width: 1860px) {
.wy-breadcrumbs {
margin-top: 32px;
}
}
@media (max-width: 768px) {
div.hf-menu {
display: none;
}
}
|
tokenizers/docs/source/_static/css/huggingface.css/0
|
{
"file_path": "tokenizers/docs/source/_static/css/huggingface.css",
"repo_id": "tokenizers",
"token_count": 2708
}
| 260
|
Training from memory
----------------------------------------------------------------------------------------------------
In the `Quicktour <quicktour>`__, we saw how to build and train a tokenizer using text files,
but we can actually use any Python Iterator. In this section we'll see a few different ways of
training our tokenizer.
For all the examples listed below, we'll use the same :class:`~tokenizers.Tokenizer` and
:class:`~tokenizers.trainers.Trainer`, built as following:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START init_tokenizer_trainer
:end-before: END init_tokenizer_trainer
:dedent: 8
This tokenizer is based on the :class:`~tokenizers.models.Unigram` model. It takes care of
normalizing the input using the NFKC Unicode normalization method, and uses a
:class:`~tokenizers.pre_tokenizers.ByteLevel` pre-tokenizer with the corresponding decoder.
For more information on the components used here, you can check `here <components>`__
The most basic way
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As you probably guessed already, the easiest way to train our tokenizer is by using a :obj:`List`:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START train_basic
:end-before: END train_basic
:dedent: 8
Easy, right? You can use anything working as an iterator here, be it a :obj:`List`, :obj:`Tuple`,
or a :obj:`np.Array`. Anything works as long as it provides strings.
Using the π€ Datasets library
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An awesome way to access one of the many datasets that exist out there is by using the π€ Datasets
library. For more information about it, you should check
`the official documentation here <https://huggingface.co/docs/datasets/>`__.
Let's start by loading our dataset:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START load_dataset
:end-before: END load_dataset
:dedent: 8
The next step is to build an iterator over this dataset. The easiest way to do this is probably by
using a generator:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START def_batch_iterator
:end-before: END def_batch_iterator
:dedent: 8
As you can see here, for improved efficiency we can actually provide a batch of examples used
to train, instead of iterating over them one by one. By doing so, we can expect performances very
similar to those we got while training directly from files.
With our iterator ready, we just need to launch the training. In order to improve the look of our
progress bars, we can specify the total length of the dataset:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START train_datasets
:end-before: END train_datasets
:dedent: 8
And that's it!
Using gzip files
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Since gzip files in Python can be used as iterators, it is extremely simple to train on such files:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START single_gzip
:end-before: END single_gzip
:dedent: 8
Now if we wanted to train from multiple gzip files, it wouldn't be much harder:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START multi_gzip
:end-before: END multi_gzip
:dedent: 8
And voilΓ !
|
tokenizers/docs/source/tutorials/python/training_from_memory.rst/0
|
{
"file_path": "tokenizers/docs/source/tutorials/python/training_from_memory.rst",
"repo_id": "tokenizers",
"token_count": 1149
}
| 261
|
[package]
name = "unstable_wasm"
version = "0.1.0"
authors = ["Nicolas Patry"]
edition = "2018"
[lib]
crate-type = ["cdylib", "rlib"]
[features]
default = ["console_error_panic_hook"]
[dependencies]
wasm-bindgen = "0.2.63"
# The `console_error_panic_hook` crate provides better debugging of panics by
# logging them with `console.error`. This is great for development, but requires
# all the `std::fmt` and `std::panicking` infrastructure, so isn't great for
# code size when deploying.
console_error_panic_hook = { version = "0.1.6", optional = true }
# `wee_alloc` is a tiny allocator for wasm that is only ~1K in code size
# compared to the default allocator's ~10K. It is slower than the default
# allocator, however.
#
# Unfortunately, `wee_alloc` requires nightly Rust when targeting wasm for now.
wee_alloc = { version = "0.4.5", optional = true }
tokenizers = { path = "../../", default-features=false, features = ["unstable_wasm"]}
[dev-dependencies]
wasm-bindgen-test = "0.3.13"
[profile.release]
# Tell `rustc` to optimize for small code size.
opt-level = "s"
|
tokenizers/tokenizers/examples/unstable_wasm/Cargo.toml/0
|
{
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/Cargo.toml",
"repo_id": "tokenizers",
"token_count": 364
}
| 262
|
const CopyWebpackPlugin = require("copy-webpack-plugin");
const path = require('path');
module.exports = {
entry: "./bootstrap.js",
output: {
path: path.resolve(__dirname, "dist"),
filename: "bootstrap.js",
},
mode: "development",
plugins: [
new CopyWebpackPlugin(['index.html'])
],
};
|
tokenizers/tokenizers/examples/unstable_wasm/www/webpack.config.js/0
|
{
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/webpack.config.js",
"repo_id": "tokenizers",
"token_count": 114
}
| 263
|
//! Popular tokenizer models.
pub mod bpe;
pub mod unigram;
pub mod wordlevel;
pub mod wordpiece;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::models::bpe::{BpeTrainer, BPE};
use crate::models::unigram::{Unigram, UnigramTrainer};
use crate::models::wordlevel::{WordLevel, WordLevelTrainer};
use crate::models::wordpiece::{WordPiece, WordPieceTrainer};
use crate::{AddedToken, Model, Result, Token, Trainer};
/// Wraps a vocab mapping (ID -> token) to a struct that will be serialized in order
/// of token ID, smallest to largest.
struct OrderedVocabIter<'a> {
vocab_r: &'a HashMap<u32, String>,
}
impl<'a> OrderedVocabIter<'a> {
fn new(vocab_r: &'a HashMap<u32, String>) -> Self {
Self { vocab_r }
}
}
impl<'a> Serialize for OrderedVocabIter<'a> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
// There could be holes so max + 1 is more correct than vocab_r.len()
let mut holes = vec![];
let result = if let Some(max) = self.vocab_r.iter().map(|(key, _)| key).max() {
let iter = (0..*max + 1).filter_map(|i| {
if let Some(token) = self.vocab_r.get(&i) {
Some((token, i))
} else {
holes.push(i);
None
}
});
serializer.collect_map(iter)
} else {
serializer.collect_map(std::iter::empty::<(&str, u32)>())
};
if !holes.is_empty() {
warn!("The OrderedVocab you are attempting to save contains holes for indices {:?}, your vocabulary could be corrupted !", holes);
println!("The OrderedVocab you are attempting to save contains holes for indices {:?}, your vocabulary could be corrupted !", holes);
}
result
}
}
#[derive(Serialize, Debug, PartialEq, Clone)]
#[serde(untagged)]
pub enum ModelWrapper {
BPE(BPE),
// WordPiece must stay before WordLevel here for deserialization (for retrocompatibility
// with the versions not including the "type"), since WordLevel is a subset of WordPiece
WordPiece(WordPiece),
WordLevel(WordLevel),
Unigram(Unigram),
}
impl<'de> Deserialize<'de> for ModelWrapper {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
pub struct Tagged {
#[serde(rename = "type")]
variant: EnumType,
#[serde(flatten)]
rest: serde_json::Value,
}
#[derive(Deserialize)]
pub enum EnumType {
BPE,
WordPiece,
WordLevel,
Unigram,
}
#[derive(Deserialize)]
#[serde(untagged)]
pub enum ModelHelper {
Tagged(Tagged),
Legacy(serde_json::Value),
}
#[derive(Deserialize)]
#[serde(untagged)]
pub enum ModelUntagged {
BPE(BPE),
// WordPiece must stay before WordLevel here for deserialization (for retrocompatibility
// with the versions not including the "type"), since WordLevel is a subset of WordPiece
WordPiece(WordPiece),
WordLevel(WordLevel),
Unigram(Unigram),
}
let helper = ModelHelper::deserialize(deserializer)?;
Ok(match helper {
ModelHelper::Tagged(model) => match model.variant {
EnumType::BPE => ModelWrapper::BPE(
serde_json::from_value(model.rest).map_err(serde::de::Error::custom)?,
),
EnumType::WordPiece => ModelWrapper::WordPiece(
serde_json::from_value(model.rest).map_err(serde::de::Error::custom)?,
),
EnumType::WordLevel => ModelWrapper::WordLevel(
serde_json::from_value(model.rest).map_err(serde::de::Error::custom)?,
),
EnumType::Unigram => ModelWrapper::Unigram(
serde_json::from_value(model.rest).map_err(serde::de::Error::custom)?,
),
},
ModelHelper::Legacy(value) => {
let untagged = serde_json::from_value(value).map_err(serde::de::Error::custom)?;
match untagged {
ModelUntagged::BPE(bpe) => ModelWrapper::BPE(bpe),
ModelUntagged::WordPiece(bpe) => ModelWrapper::WordPiece(bpe),
ModelUntagged::WordLevel(bpe) => ModelWrapper::WordLevel(bpe),
ModelUntagged::Unigram(bpe) => ModelWrapper::Unigram(bpe),
}
}
})
}
}
impl_enum_from!(WordLevel, ModelWrapper, WordLevel);
impl_enum_from!(WordPiece, ModelWrapper, WordPiece);
impl_enum_from!(BPE, ModelWrapper, BPE);
impl_enum_from!(Unigram, ModelWrapper, Unigram);
impl Model for ModelWrapper {
type Trainer = TrainerWrapper;
fn tokenize(&self, tokens: &str) -> Result<Vec<Token>> {
match self {
Self::WordLevel(t) => t.tokenize(tokens),
Self::WordPiece(t) => t.tokenize(tokens),
Self::BPE(t) => t.tokenize(tokens),
Self::Unigram(t) => t.tokenize(tokens),
}
}
fn token_to_id(&self, token: &str) -> Option<u32> {
match self {
Self::WordLevel(t) => t.token_to_id(token),
Self::WordPiece(t) => t.token_to_id(token),
Self::BPE(t) => t.token_to_id(token),
Self::Unigram(t) => t.token_to_id(token),
}
}
fn id_to_token(&self, id: u32) -> Option<String> {
match self {
Self::WordLevel(t) => t.id_to_token(id),
Self::WordPiece(t) => t.id_to_token(id),
Self::BPE(t) => t.id_to_token(id),
Self::Unigram(t) => t.id_to_token(id),
}
}
fn get_vocab(&self) -> HashMap<String, u32> {
match self {
Self::WordLevel(t) => t.get_vocab(),
Self::WordPiece(t) => t.get_vocab(),
Self::BPE(t) => t.get_vocab(),
Self::Unigram(t) => t.get_vocab(),
}
}
fn get_vocab_size(&self) -> usize {
match self {
Self::WordLevel(t) => t.get_vocab_size(),
Self::WordPiece(t) => t.get_vocab_size(),
Self::BPE(t) => t.get_vocab_size(),
Self::Unigram(t) => t.get_vocab_size(),
}
}
fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> {
match self {
Self::WordLevel(t) => t.save(folder, name),
Self::WordPiece(t) => t.save(folder, name),
Self::BPE(t) => t.save(folder, name),
Self::Unigram(t) => t.save(folder, name),
}
}
fn get_trainer(&self) -> Self::Trainer {
match self {
Self::WordLevel(t) => t.get_trainer().into(),
Self::WordPiece(t) => t.get_trainer().into(),
Self::BPE(t) => t.get_trainer().into(),
Self::Unigram(t) => t.get_trainer().into(),
}
}
}
#[derive(Clone, Serialize, Deserialize)]
pub enum TrainerWrapper {
BpeTrainer(BpeTrainer),
WordPieceTrainer(WordPieceTrainer),
WordLevelTrainer(WordLevelTrainer),
UnigramTrainer(UnigramTrainer),
}
impl Trainer for TrainerWrapper {
type Model = ModelWrapper;
fn should_show_progress(&self) -> bool {
match self {
Self::BpeTrainer(bpe) => bpe.should_show_progress(),
Self::WordPieceTrainer(wpt) => wpt.should_show_progress(),
Self::WordLevelTrainer(wpt) => wpt.should_show_progress(),
Self::UnigramTrainer(wpt) => wpt.should_show_progress(),
}
}
fn train(&self, model: &mut ModelWrapper) -> Result<Vec<AddedToken>> {
match self {
Self::BpeTrainer(t) => match model {
ModelWrapper::BPE(bpe) => t.train(bpe),
_ => Err("BpeTrainer can only train a BPE".into()),
},
Self::WordPieceTrainer(t) => match model {
ModelWrapper::WordPiece(wp) => t.train(wp),
_ => Err("WordPieceTrainer can only train a WordPiece".into()),
},
Self::WordLevelTrainer(t) => match model {
ModelWrapper::WordLevel(wl) => t.train(wl),
_ => Err("WordLevelTrainer can only train a WordLevel".into()),
},
Self::UnigramTrainer(t) => match model {
ModelWrapper::Unigram(u) => t.train(u),
_ => Err("UnigramTrainer can only train a Unigram".into()),
},
}
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> Result<Vec<String>> + Sync,
{
match self {
Self::BpeTrainer(bpe) => bpe.feed(iterator, process),
Self::WordPieceTrainer(wpt) => wpt.feed(iterator, process),
Self::WordLevelTrainer(wpt) => wpt.feed(iterator, process),
Self::UnigramTrainer(wpt) => wpt.feed(iterator, process),
}
}
}
impl_enum_from!(BpeTrainer, TrainerWrapper, BpeTrainer);
impl_enum_from!(WordPieceTrainer, TrainerWrapper, WordPieceTrainer);
impl_enum_from!(UnigramTrainer, TrainerWrapper, UnigramTrainer);
impl_enum_from!(WordLevelTrainer, TrainerWrapper, WordLevelTrainer);
#[cfg(test)]
mod tests {
use super::*;
use crate::models::bpe::{BpeBuilder, Vocab};
#[test]
fn trainer_wrapper_train_model_wrapper() {
let trainer = TrainerWrapper::BpeTrainer(BpeTrainer::default());
let mut model = ModelWrapper::Unigram(Unigram::default());
let result = trainer.train(&mut model);
assert!(result.is_err());
}
#[test]
fn incomplete_ordered_vocab() {
let vocab_r: HashMap<u32, String> =
HashMap::from([(0, "Hi".to_string()), (2, "There".to_string())]);
let ordered = OrderedVocabIter::new(&vocab_r);
let serialized = serde_json::to_string(&ordered).unwrap();
assert_eq!(serialized, "{\"Hi\":0,\"There\":2}");
}
#[test]
fn serialization() {
let vocab: Vocab = [
("<unk>".into(), 0),
("a".into(), 1),
("b".into(), 2),
("ab".into(), 3),
]
.iter()
.cloned()
.collect();
let bpe = BpeBuilder::default()
.vocab_and_merges(vocab, vec![("a".to_string(), "b".to_string())])
.unk_token("<unk>".to_string())
.ignore_merges(true)
.build()
.unwrap();
let model = ModelWrapper::BPE(bpe);
let legacy = r#"{"type":"BPE","dropout":null,"unk_token":"<unk>","continuing_subword_prefix":null,"end_of_word_suffix":null,"fuse_unk":false,"byte_fallback":false,"ignore_merges":true,"vocab":{"<unk>":0,"a":1,"b":2,"ab":3},"merges":["a b"]}"#;
let legacy = serde_json::from_str(legacy).unwrap();
assert_eq!(model, legacy);
let data = serde_json::to_string(&model).unwrap();
assert_eq!(
data,
r#"{"type":"BPE","dropout":null,"unk_token":"<unk>","continuing_subword_prefix":null,"end_of_word_suffix":null,"fuse_unk":false,"byte_fallback":false,"ignore_merges":true,"vocab":{"<unk>":0,"a":1,"b":2,"ab":3},"merges":[["a","b"]]}"#
);
let reconstructed = serde_json::from_str(&data).unwrap();
assert_eq!(model, reconstructed);
// Legacy check, type is not necessary.
let legacy = r#"{"dropout":null,"unk_token":"<unk>","continuing_subword_prefix":null,"end_of_word_suffix":null,"fuse_unk":false,"byte_fallback":false,"ignore_merges":true,"vocab":{"<unk>":0,"a":1,"b":2,"ab":3},"merges":["a b"]}"#;
let reconstructed = serde_json::from_str(legacy).unwrap();
assert_eq!(model, reconstructed);
let invalid = r#"{"type":"BPE","dropout":null,"unk_token":"<unk>","continuing_subword_prefix":null,"end_of_word_suffix":null,"fuse_unk":false,"byte_fallback":false,"ignore_merges":true,"vocab":{"<unk>":0,"a":1,"b":2,"ab":3},"merges":["a b c"]}"#;
let reconstructed: std::result::Result<ModelWrapper, serde_json::Error> =
serde_json::from_str(invalid);
match reconstructed {
Err(err) => assert_eq!(err.to_string(), "Merges text file invalid at line 1"),
_ => panic!("Expected an error here"),
}
}
}
|
tokenizers/tokenizers/src/models/mod.rs/0
|
{
"file_path": "tokenizers/tokenizers/src/models/mod.rs",
"repo_id": "tokenizers",
"token_count": 6101
}
| 264
|
use crate::tokenizer::{NormalizedString, Normalizer, Result};
pub use spm_precompiled::Precompiled;
use std::cmp::Ordering;
use unicode_segmentation::UnicodeSegmentation;
fn replace(transformations: &mut Vec<(char, isize)>, old_part: &str, new_part: &str) {
let old_count = old_part.chars().count() as isize;
let new_count = new_part.chars().count() as isize;
let diff = new_count - old_count;
// If we are just replacing characters, all changes should be == 0
transformations.extend(new_part.chars().map(|c| (c, 0)));
match diff.cmp(&0) {
// If we are adding some characters, the last DIFF characters shoud be == 1
Ordering::Greater => {
transformations
.iter_mut()
.rev()
.take(diff as usize)
.for_each(|(_, cs)| *cs = 1);
}
// If we are removing some characters, the last one should include the diff
Ordering::Less => {
if let Some((_, cs)) = transformations.last_mut() {
*cs += diff;
}
}
_ => {}
}
}
impl Normalizer for Precompiled {
fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> {
let mut transformations = Vec::with_capacity(normalized.get().len());
// Future reader. From @Narsil.
// Yes, this is weird,
// Yes, this seems broken
// No, I don't know why Google did this.
// If you question this code, check this normalizer against
// XNLI database (all languages) with Unigram model against
// Mbart, XLMRoberta *AND* Marian. If you don't get 100% or
// break a single test.
// You don't pass.
let mut modified = false;
normalized.get().graphemes(true).for_each(|grapheme| {
if grapheme.len() < 6 {
if let Some(norm) = self.transform(grapheme) {
modified = true;
replace(&mut transformations, grapheme, norm);
return;
}
}
for (char_index, c) in grapheme.char_indices() {
let part = &grapheme[char_index..char_index + c.len_utf8()];
if let Some(norm) = self.transform(part) {
modified = true;
replace(&mut transformations, part, norm);
} else {
transformations.push((c, 0));
}
}
});
if modified {
normalized.transform(transformations, 0);
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn expansion_followed_by_removal() {
// Simulate transformations from "β’\x1eg" to "TMg"
let mut transformations = vec![];
let mut n = NormalizedString::from("β’\x1eg");
replace(&mut transformations, "β’", "TM");
replace(&mut transformations, "\x1e", "");
transformations.push(('g', 0));
n.transform(transformations, 0);
assert_eq!(n.get(), "TMg");
}
}
|
tokenizers/tokenizers/src/normalizers/precompiled.rs/0
|
{
"file_path": "tokenizers/tokenizers/src/normalizers/precompiled.rs",
"repo_id": "tokenizers",
"token_count": 1432
}
| 265
|
use crate::pre_tokenizers::unicode_scripts::scripts::{get_script, Script};
use crate::tokenizer::{normalizer::Range, PreTokenizedString, PreTokenizer, Result};
use crate::utils::macro_rules_attribute;
#[derive(Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct UnicodeScripts;
impl UnicodeScripts {
pub fn new() -> Self {
Self {}
}
}
impl Default for UnicodeScripts {
fn default() -> Self {
Self::new()
}
}
// This code exists in the Unigram default IsValidSentencePiece.
// It could be integrated directly within `get_script` but I
// think it's kind of tricky to see those modifications later
// I am guessing release mode will optimize this away anyway.
fn fixed_script(c: char) -> Script {
let raw_script = get_script(c);
if c as u32 == 0x30FC {
Script::Han
} else if c == ' ' {
Script::Any
} else {
match raw_script {
Script::Hiragana => Script::Han,
Script::Katakana => Script::Han,
script => script,
}
}
}
impl PreTokenizer for UnicodeScripts {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, normalized| {
let mut last_script = None;
let mut offset = 0;
let mut ranges: Vec<_> = normalized
.get()
.chars()
.filter_map(|c| {
let script = Some(fixed_script(c));
let result = if script != Some(Script::Any)
&& last_script != Some(Script::Any)
&& last_script != script
{
Some(offset)
} else {
None
};
offset += c.len_utf8();
if script != Some(Script::Any) {
last_script = script;
}
result
})
.collect();
ranges.push(normalized.get().len());
Ok(ranges
.windows(2)
.map(|item| {
normalized
.slice(Range::Normalized(item[0]..item[1]))
.expect("NormalizedString bad split")
})
.collect::<Vec<_>>())
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::OffsetReferential;
use crate::OffsetType;
#[test]
fn basic() {
let pretok = UnicodeScripts {};
let mut pretokenized = PreTokenizedString::from("γ©γγ§ηγγYes");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("γ©γγ§ηγ", (0, 15)), ("γ", (15, 18)), ("Yes", (18, 21))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("γ©γγ§ηγ", (0, 15)), ("γ", (15, 18)), ("Yes", (18, 21))]
);
}
#[test]
fn spaces_are_included_in_every_script() {
let pretok = UnicodeScripts {};
let mut pretokenized = PreTokenizedString::from("Apples are γγγ ζζͺ");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Apples are ", (0, 11)), ("γγγ ζζͺ", (11, 27))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Apples are ", (0, 11)), ("γγγ ζζͺ", (11, 27))]
);
}
#[test]
fn test_unicode_script() {
assert_eq!(Script::Han, fixed_script('δΊ¬'));
assert_eq!(Script::Han, fixed_script('ε€ͺ'));
assert_eq!(Script::Han, fixed_script('γ'));
assert_eq!(Script::Han, fixed_script('γ°'));
assert_eq!(Script::Han, fixed_script('γΌ'));
assert_eq!(Script::Latin, fixed_script('a'));
assert_eq!(Script::Latin, fixed_script('A'));
assert_eq!(Script::Common, fixed_script('0'));
assert_eq!(Script::Common, fixed_script('$'));
assert_eq!(Script::Common, fixed_script('@'));
assert_eq!(Script::Common, fixed_script('-'));
assert_eq!(Script::Any, fixed_script(' '));
}
}
|
tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/pre_tokenizer.rs/0
|
{
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/pre_tokenizer.rs",
"repo_id": "tokenizers",
"token_count": 2584
}
| 266
|
use crate::tokenizer::pattern::Pattern;
use crate::Offsets;
use fancy_regex::Regex;
use std::error::Error;
#[derive(Debug)]
pub struct SysRegex {
regex: Regex,
}
impl SysRegex {
pub fn find_iter<'r, 't>(&'r self, inside: &'t str) -> Matches<'r, 't> {
Matches(self.regex.find_iter(inside))
}
pub fn new(regex_str: &str) -> Result<Self, Box<dyn Error + Send + Sync + 'static>> {
Ok(Self {
regex: Regex::new(regex_str)?,
})
}
}
pub struct Matches<'r, 't>(fancy_regex::Matches<'r, 't>);
impl<'r, 't> Iterator for Matches<'r, 't> {
type Item = (usize, usize);
fn next(&mut self) -> Option<Self::Item> {
match self.0.next() {
Some(Ok(mat)) => Some((mat.start(), mat.end())),
// stop if an error is encountered
None | Some(Err(_)) => None,
}
}
}
impl Pattern for &Regex {
fn find_matches(
&self,
inside: &str,
) -> Result<Vec<(Offsets, bool)>, Box<dyn Error + Send + Sync + 'static>> {
if inside.is_empty() {
return Ok(vec![((0, 0), false)]);
}
let mut prev = 0;
let mut splits = Vec::with_capacity(inside.len());
for match_ in self.find_iter(inside) {
let match_ = match_?;
let start = match_.start();
let end = match_.end();
if prev != start {
splits.push(((prev, start), false));
}
splits.push(((start, end), true));
prev = end;
}
if prev != inside.len() {
splits.push(((prev, inside.len()), false))
}
Ok(splits)
}
}
|
tokenizers/tokenizers/src/utils/fancy.rs/0
|
{
"file_path": "tokenizers/tokenizers/src/utils/fancy.rs",
"repo_id": "tokenizers",
"token_count": 831
}
| 267
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.